Пример #1
0
    def __init__(self,
                 n_inputs,
                 n_outputs,
                 kernel_size,
                 stride,
                 dilation,
                 padding,
                 dropout=0.2):
        super(TemporalBlock, self).__init__()
        self.conv1 = weight_norm(
            nn.Conv1d(n_inputs,
                      n_outputs,
                      kernel_size,
                      stride=stride,
                      padding=padding,
                      dilation=dilation))
        # self.chomp1 = Chomp1d(padding)
        self.relu1 = nn.RReLU()
        self.dropout1 = nn.Dropout(dropout)

        self.conv2 = weight_norm(
            nn.Conv1d(n_outputs,
                      n_outputs,
                      kernel_size,
                      stride=stride,
                      padding=padding,
                      dilation=dilation))
        # self.chomp2 = Chomp1d(padding)
        self.relu2 = nn.RReLU()
        self.dropout2 = nn.Dropout(dropout)

        # self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
        #                          self.conv2, self.chomp2, self.relu2, self.dropout2)
        self.net = nn.Sequential(self.conv1, self.relu1, self.dropout1,
                                 self.conv2, self.relu2, self.dropout2)
        self.downsample = nn.Conv1d(n_inputs, n_outputs,
                                    1) if n_inputs != n_outputs else None
        self.relu = nn.RReLU()
        self.init_weights()
Пример #2
0
    def __init__(self,
                 state_size,
                 action_size,
                 fc_sizes=None,
                 actor_fc_sizes=[256, 128, 64],
                 critic_fc_sizes=[256, 128, 64]):
        super(ActorCritic, self).__init__()
        #if the size of the common layers is specify, then create them
        #if not then set it to None so that it flags that that the actor and critic networks are separated
        if (fc_sizes != None and (len(fc_sizes) >= 1)):
            sequence_dict = OrderedDict()
            sequence_dict['fc0'] = nn.Linear(state_size, fc_sizes[0])
            sequence_dict['fc_rrelu0'] = nn.RReLU()
            for i, fc_size in enumerate(fc_sizes):
                if (i == len(fc_sizes) - 1):
                    break
                sequence_dict['fc{}'.format(i + 1)] = nn.Linear(
                    fc_size, fc_sizes[i + 1])
                sequence_dict['fc_rrelu{}'.format(i + 1)] = nn.RReLU()

            self.fc_common = nn.Sequential(sequence_dict)
        else:
            self.fc_common = None

        if (self.fc_common != None):
            self.actor = Actor(fc_sizes[-1], action_size, actor_fc_sizes)
            self.critic = Critic(fc_sizes[-1], action_size, critic_fc_sizes)
        else:
            self.actor = Actor(state_size, action_size, actor_fc_sizes)
            self.critic = Critic(state_size, action_size, critic_fc_sizes)

        #weight initialization using xavier initializer
        if (self.fc_common != None):
            self.fc_common.apply(ActorCritic.init_weights)
        self.critic.critic_first_layer.apply(ActorCritic.init_weights)
        self.actor.fc_actor.apply(ActorCritic.init_weights)
        self.critic.fc_critic.apply(ActorCritic.init_weights)

        self.batchnorm = nn.BatchNorm1d(100)
Пример #3
0
def get_activation(args):
    if args.activation == 'leaky_relu':
        return nn.LeakyReLU(args.leaky_relu)
    elif args.activation == 'rrelu':
        return nn.RReLU()
    elif args.activation == 'relu':
        return nn.ReLU()
    elif args.activation == 'elu':
        return nn.ELU()
    elif args.activation == 'prelu':
        return nn.PReLU()
    elif args.activation == 'selu':
        return nn.SELU()
Пример #4
0
 def _init_fc(self):
     layers_fc = []
     in_channels_fc = 256 * 2 * 2
     for i in range(2):
         layers_fc += [
             nn.Dropout(p=0.85),
             nn.Linear(in_channels_fc, fc_channel_nums[i]),
             nn.BatchNorm1d(fc_channel_nums[i]),
             nn.RReLU(0.1, 0.3, inplace=True)
         ]
         in_channels_fc = fc_channel_nums[i]
     layers_fc += [nn.Linear(fc_channel_nums[1], self.num_classes)]
     return nn.Sequential(*layers_fc)
Пример #5
0
def create_classifier(input_size_, hidden_layers_, output_size_ = 102, drop_p_ = 0.5):
    dict = OrderedDict()
    
    # Input to a hidden layer, the first layer
    dict['fc0'] = nn.Linear(input_size_, hidden_layers_[0])
    dict['relu0'] = nn.RReLU(inplace=True)
    dict['dropout0'] = nn.Dropout(p=drop_p_)
    
    # Add a variable number of more hidden_layers
    layer_inx = 1
    layer_sizes = zip(hidden_layers_[:-1], hidden_layers_[1:])
    for layer_size in layer_sizes:
        dict['fc'+str(layer_inx)] = nn.Linear(layer_size[0], layer_size[1])
        dict['relu'+str(layer_inx)] = nn.RReLU(inplace=True)
        dict['dropout'+str(layer_inx)] = nn.Dropout(p=drop_p_)
        # Next layer index
        layer_inx += 1
        
    dict['fc'+str(layer_inx)] = nn.Linear(layer_size[-1], output_size_)
    dict['output'] = nn.LogSoftmax(dim = 1)
    
    return nn.Sequential(dict)
Пример #6
0
 def __init__(self, input_data, output_data):
     super(ConvTransBlock, self).__init__()
     self.conv1 = nn.Sequential(
         nn.ConvTranspose3d(input_data,
                            output_data,
                            kernel_size=3,
                            stride=2,
                            padding=1,
                            output_padding=1,
                            dilation=1),
         nn.BatchNorm3d(output_data),
         nn.RReLU(inplace=True),
     )
 def __init__(self, fc_dim1, fc_dim2, granularity='word'):
     super(FastText, self).__init__()
     embed_mat = torch.from_numpy(
         np.load(f'../../data/{granularity}_embed_mat.npy').astype(
             np.float32))
     num_word, embed_dim = embed_mat.size()
     self.embed = nn.Embedding.from_pretrained(embed_mat, False)
     self.fc1 = nn.Linear(embed_dim, fc_dim1)
     self.fc2 = nn.Linear(fc_dim1, fc_dim2)
     self.out = nn.Linear(fc_dim2, 19)
     self.act = nn.RReLU()
     self.bn1 = nn.BatchNorm1d(fc_dim1)
     self.bn2 = nn.BatchNorm1d(fc_dim2)
Пример #8
0
 def __init__(self, input_size, num_classes):
     super(corr_nn, self).__init__()
     self.fc1 = nn.Linear(input_size, 32)
     self.relu = nn.ReLU()
     self.sig = nn.Sigmoid()
     self.tanh = nn.Tanh()
     self.rrelu = nn.RReLU()
     self.leakyRelu = nn.LeakyReLU()
     self.fc2 = nn.Linear(32, 64)
     self.fc3 = nn.Linear(64, num_classes)
     self.dropout = nn.Dropout(p=0.2)
     self.bn1 = nn.BatchNorm1d(64)
     self.logsoftmax = nn.LogSoftmax(dim=1)
Пример #9
0
    def __init__(self, N_word, N_h, N_depth, max_tok_num, use_ca, use_sel_cnn,
                 filter_num):
        super(SelPredictor, self).__init__()
        self.use_ca = use_ca
        self.use_sel_cnn = use_sel_cnn
        self.filter_num = filter_num
        self.max_tok_num = max_tok_num
        if use_sel_cnn:
            self.sel_conv = nn.Sequential(
                nn.Conv2d(in_channels=1,
                          out_channels=self.filter_num,
                          kernel_size=(7, N_word),
                          stride=(1, 1),
                          padding=(3, 0)), nn.BatchNorm2d(self.filter_num),
                nn.RReLU())
            self.sel_dropout = nn.Dropout2d(p=0.5)
            self.sel_name_enc = nn.LSTM(input_size=N_word,
                                        hidden_size=int(self.filter_num / 2),
                                        num_layers=N_depth,
                                        batch_first=True,
                                        dropout=0.5,
                                        bidirectional=True)
            self.sel_att = nn.Linear(self.filter_num, 1)

            self.sel_out_K = nn.Linear(self.filter_num, self.filter_num)
            self.sel_out_col = nn.Linear(self.filter_num, self.filter_num)
            self.sel_out = nn.Linear(self.filter_num, 1)
        else:
            self.sel_lstm = nn.LSTM(input_size=N_word,
                                    hidden_size=int(N_h / 2),
                                    num_layers=N_depth,
                                    batch_first=True,
                                    dropout=0.3,
                                    bidirectional=True)
            if use_ca:
                print("Using column attention on selection predicting")
                self.sel_att = nn.Linear(N_h, N_h)
            else:
                print("Not using column attention on selection predicting")
                self.sel_att = nn.Linear(N_h, 1)
            self.sel_col_name_enc = nn.LSTM(input_size=N_word,
                                            hidden_size=int(N_h / 2),
                                            num_layers=N_depth,
                                            batch_first=True,
                                            dropout=0.3,
                                            bidirectional=True)
            self.sel_out_K = nn.Linear(N_h, N_h)
            self.sel_out_col = nn.Linear(N_h, N_h)
            self.sel_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 1))

        self.softmax = nn.Softmax()
Пример #10
0
  def __init__(self):
    super(CaptchaModel, self).__init__()

    self.conv1 = nn.Sequential(
      nn.Conv2d(3, 16, 5),
      nn.MaxPool2d(3, 3),
      nn.BatchNorm2d(16),
      nn.RReLU()
    )

    self.conv2 = nn.Sequential(
      nn.Conv2d(16, 32, 3),
      nn.MaxPool2d(3, 3),
      nn.BatchNorm2d(32),
      nn.RReLU()
    )

    self.conv3 = nn.Sequential(
      nn.Conv2d(32, 64, 3),
      nn.MaxPool2d(2, 2),
      nn.BatchNorm2d(64),
      nn.RReLU(),
      nn.Flatten(),
      nn.Dropout(0.15)
    )

    self.dense1 = nn.Sequential(
      nn.Linear(576, 128),
      nn.BatchNorm1d(128),
      nn.RReLU()
    )

    self.dropout = nn.Dropout(0.1)

    self.out1 = nn.Linear(128, 62)
    self.out2 = nn.Linear(128, 62)
    self.out3 = nn.Linear(128, 62)
    self.out4 = nn.Linear(128, 62)
Пример #11
0
    def __init__(self,
                 input_data=256,
                 output_data=256,
                 dense_features=(10, 12, 8),
                 stride=2,
                 kernel_size=3,
                 padding=1,
                 activation="relu",
                 normalization="group"):
        super(VDResampling, self).__init__()

        midput_data = input_data // 2
        self.dense_features = dense_features
        # print('self.dense_features: {}'.format(self.dense_features))

        self.gn1 = nn.GroupNorm(num_groups=8, num_channels=input_data)
        if activation == 'relu':
            self.actv1 = nn.ReLU(inplace=True)
            self.actv2 = nn.ReLU(inplace=True)
        elif activation == 'rrelu':
            self.actv1 = nn.RReLU(inplace=True)
            self.actv2 = nn.RReLU(inplace=True)

        self.conv1 = nn.Conv3d(in_channels=input_data,
                               out_channels=16,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding)
        self.dense1 = nn.Linear(in_features=16 * self.dense_features[0] *
                                self.dense_features[1] *
                                self.dense_features[2],
                                out_features=input_data)
        self.dense2 = nn.Linear(
            in_features=midput_data,
            out_features=midput_data * self.dense_features[0] *
            self.dense_features[1] * self.dense_features[2])

        self.up0 = LinearUpSampling(midput_data, output_data)
Пример #12
0
 def convRelu(i, batchNormalization=False):
     nIn = nc if i == 0 else nm[i - 1]
     nOut = nm[i]
     cnn.add_module('conv{0}'.format(i),
                    nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
     if batchNormalization:
         cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
     if leakyRelu:
         cnn.add_module('relu{0}'.format(i),
                        nn.LeakyReLU(0.1, inplace=True))
     elif RRelu:
         cnn.add_module('relu{0}'.format(i), nn.RReLU(inplace=True))
     else:
         cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
Пример #13
0
 def __init__(self,
              input_dim=3000,
              noise_dim=0,
              output_dim=128,
              H=64,
              W=64):
     super(LSTM_Embedding, self).__init__()
     self.input_dim = input_dim
     self.noise_dim = noise_dim
     self.output_dim = output_dim
     self.H = H
     self.W = W
     self.fc = nn.Linear(input_dim + noise_dim, output_dim * H * W)
     self.leaky_relu = nn.RReLU()
Пример #14
0
    def __init__(self, training=False):
        super(Pnet, self).__init__()

        self.training = training
        self.basenet = nn.Sequential(
            nn.Conv2d(in_channels=3,
                      out_channels=10,
                      kernel_size=3,
                      stride=1,
                      padding=0,
                      bias=True),
            nn.PReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            nn.Conv2d(in_channels=10,
                      out_channels=16,
                      kernel_size=3,
                      stride=1,
                      padding=0,
                      bias=True),
            nn.RReLU(),
            nn.Conv2d(in_channels=16,
                      out_channels=32,
                      kernel_size=3,
                      stride=1,
                      padding=0,
                      bias=True),
            nn.PReLU(),
        )

        self.conv4_1 = nn.Conv2d(in_channels=32,
                                 out_channels=1,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0,
                                 bias=True)
        self.conv4_2 = nn.Conv2d(in_channels=32,
                                 out_channels=4,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0,
                                 bias=True)
        if USE_LANDMARK:
            self.conv4_3 = nn.Conv2d(in_channels=32,
                                     out_channels=10,
                                     kernel_size=1,
                                     stride=1,
                                     padding=0,
                                     bias=True)

        self.loss_func = Lossfunc()
Пример #15
0
 def __init__(self, nl, hd, ba):
     super(Classifier_LSTM, self).__init__()
     self.nl = nl
     self.hd = hd
     self.ba = ba
     self.lstm1 = nn.LSTM(wab.INPUT_DIM,
                          hd,
                          num_layers=nl,
                          bias=True,
                          batch_first=True)
     self.relu = nn.RReLU()
     self.drop = nn.Dropout(p=wab.DROPOUT)
     self.fc = nn.Linear(hd, wab.NUM_CLASSES)
     self.sig = nn.Sigmoid()
Пример #16
0
    def Activation (self,Activation):

        if Activation == 'PRELU1':

            return PReLU.PRELU()

        elif Activation == 'PRELU2':

            return PReLU.PRELU2()

        elif Activation == 'PRELU3':

            return PReLU.PRELU3()

        elif Activation == 'PRELU4':

            return PReLU.PRELU4()

        elif Activation == 'PRELU5':

            return PReLU.PRELU5()

        elif Activation == 'PRELU6':

            return PReLU.PRELU6()

        elif Activation == 'RELU':
            return nn.ReLU()

        elif Activation == 'RELU6':
            return nn.ReLU6()

        elif Activation == 'RRELU':
            return nn.RReLU()

        elif Activation == 'ELU':
            return nn.ELU()

        elif Activation == 'CELU':
            return nn.CELU()

        elif Activation == 'SELU':
            return nn.SELU()

        elif Activation == 'LRELU':
            return nn.LeakyReLU()

        elif Activation == 'PRELU':
            return nn.LeakyReLU()
Пример #17
0
def get_activation(activation):
    if isinstance(activation, str):
        if activation == 'relu':
            return nn.ReLU()
        elif activation == 'leaky':
            return nn.LeakyReLU(negative_slope=0.1)
        elif activation == 'prelu':
            return nn.PReLU(num_parameters=1)
        elif activation == 'rrelu':
            return nn.RReLU()
        elif activation == 'lin':
            return nn.Identity()
    else:
        # Deep copy is necessary in case of paremtrized activations
        return copy.deepcopy(activation)
Пример #18
0
    def decoder(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, bias=True,final=True):

        if final:
            layer = nn.Sequential(
                nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
                                   output_padding=output_padding, bias=bias),
                nn.Softmax()
            )
        else:
            layer = nn.Sequential(
                nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
                                   output_padding=output_padding, bias=bias),
                nn.RReLU()
            )
        return layer
Пример #19
0
    def __init__(self):
        super(FaceCNN, self).__init__()

        # 第一次卷积、池化
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=1,
                      out_channels=64,
                      kernel_size=3,
                      stride=1,
                      padding=1),  # 卷积层
            nn.BatchNorm2d(num_features=64),  # 归一化
            nn.RReLU(inplace=True),  # 激活函数
            nn.MaxPool2d(kernel_size=2, stride=2),  # 最大值池化
        )

        # 第二次卷积、池化
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=64,
                      out_channels=128,
                      kernel_size=3,
                      stride=1,
                      padding=1),
            nn.BatchNorm2d(num_features=128),
            nn.RReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )

        # 第三次卷积、池化
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels=128,
                      out_channels=256,
                      kernel_size=3,
                      stride=1,
                      padding=1),
            nn.BatchNorm2d(num_features=256),
            nn.RReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )

        # 参数初始化
        self.conv1.apply(gaussian_weights_init)
        self.conv2.apply(gaussian_weights_init)
        self.conv3.apply(gaussian_weights_init)

        # 全连接层
        self.fc = nn.Sequential(
            nn.Dropout(p=0.2),
            nn.Linear(in_features=256 * 6 * 6, out_features=4096),
            nn.RReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(in_features=4096, out_features=1024),
            nn.RReLU(inplace=True),
            nn.Linear(in_features=1024, out_features=256),
            nn.RReLU(inplace=True),
            nn.Linear(in_features=256, out_features=7),
        )
Пример #20
0
 def __init__(self):
     self.best_step = 1000
     self.activations = {
         'sigmoid': nn.Sigmoid(),
         'custom': self.custom,
         'relu': nn.ReLU(),
         'relu6': nn.ReLU6(),
         'rrelu0103': nn.RReLU(0.1, 0.3),
         'rrelu0205': nn.RReLU(0.2, 0.5),
         'htang1': nn.Hardtanh(-1, 1),
         'htang2': nn.Hardtanh(-2, 2),
         'htang3': nn.Hardtanh(-3, 3),
         'tanh': nn.Tanh(),
         'elu': nn.ELU(),
         'selu': nn.SELU(),
         'hardshrink': nn.Hardshrink(),
         'leakyrelu01': nn.LeakyReLU(0.1),
         'leakyrelu001': nn.LeakyReLU(0.01),
         'logsigmoid': nn.LogSigmoid(),
         'prelu': nn.PReLU(),
     }
     self.learning_rate = 1.0
     self.hidden_size = 11
     self.activation_hidden = 'relu'
     self.activation_output = 'sigmoid'
     self.sols = {}
     self.solsSum = {}
     self.random = 0
     self.random_grid = [_ for _ in range(10)]
     self.hidden_size_grid = [3, 5, 7, 11]
     self.learning_rate_grid = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
     #self.learning_rate_grid = [1.0 + i/100.0 for i in range(10)]
     self.activation_hidden_grid = self.activations.keys()
     #self.activation_output_grid = self.activations.keys()
     self.grid_search = GridSearch(self)
     self.grid_search.set_enabled(True)
Пример #21
0
    def __init__(self):
        super(Action_Conditioned_FF, self).__init__()
        input_size = 6
        hidden_size = 20
        hidden_2_size = 26
        output_size = 1
        self.input_to_hidden = nn.Linear(input_size, hidden_size)
        self.h1_h2 = nn.Linear(hidden_size, hidden_2_size)

        self.nonlinear_activation = nn.RReLU()
        self.nonlinear_activation2 = nn.Sigmoid()
        self.hidden_to_output = nn.Linear(hidden_2_size, output_size)
        # Relu here? sigmoid? relu seems better, was never actually activated just realized oops
        #  Current config is best so far, change loss
        self.output_activation = nn.Softmax(dim=0)
Пример #22
0
 def __init__(self, input_data, output_data):
     super(NewResUnet_ResBlock1, self).__init__()
     self.resblock = nn.Sequential(
         nn.BatchNorm3d(input_data),
         nn.RReLU(lower=0.125, upper=1.0 / 3, inplace=True),
         nn.Conv3d(input_data,
                   output_data,
                   kernel_size=3,
                   stride=1,
                   padding=1), nn.BatchNorm3d(output_data),
         nn.RReLU(lower=0.125, upper=1.0 / 3, inplace=True),
         nn.Conv3d(output_data,
                   output_data,
                   kernel_size=3,
                   stride=1,
                   padding=1))
     self.conv = nn.Sequential(
         nn.BatchNorm3d(input_data),
         nn.RReLU(lower=0.125, upper=1.0 / 3, inplace=True),
         nn.Conv3d(input_data,
                   output_data,
                   kernel_size=3,
                   stride=1,
                   padding=1))
Пример #23
0
def get_activation_fn(name, channels):
    activation_fn_map = {
        'ELU': lambda: nn.ELU(),
        'ReLU': lambda: nn.ReLU(),
        'RReLU': lambda: nn.RReLU(),
        'PReLU': lambda: nn.PReLU(channels),
        'SELU': lambda: nn.SELU(),
        'CELU': lambda: nn.CELU(),
        'ReLU6': lambda: nn.ReLU6(),
        'Hardtanh': lambda: nn.Hardtanh(min_val=0.0, max_val=1.0),
        'Sigmoid': lambda: nn.Sigmoid(),
        'None': lambda: None
    }

    return activation_fn_map[name]()
Пример #24
0
 def __init__(
     self,
     in_features,
     out_features=4,
     dropout=0,
 ):
     super(NNRegressorDropout, self).__init__()
     self.model = nn.Sequential(
         nn.Linear(in_features, 436),  #85 % di 512
         nn.RReLU(),
         nn.Dropout(0.2),
         nn.Linear(436, 436),  #apprendi un alto numero di feature
         nn.RReLU(),
         nn.Dropout(0.3),
         nn.Linear(436, 380),  #74% di 512
         nn.RReLU(),
         nn.Dropout(0.4),
         nn.Linear(380, 260),  #60% di 512
         nn.RReLU(),
         nn.Dropout(0.2),
         nn.Linear(260, 102),  #circa il 20% di 512 #regola 80-20
         nn.RReLU(),
         nn.Linear(102, 4),
     )
    def __init__(self):
        super(TencentModel3, self).__init__()

        self.dropout = nn.Dropout(DROPOUT)

        self.conv_head_0 = conv1x1_layer(in_channels=556, out_channels=128)
        self.conv_head_1 = conv1x1_layer(in_channels=556, out_channels=128)
        self.conv_head_2 = conv1x1_layer(in_channels=556, out_channels=128)
        self.conv_head_3 = conv1x1_layer(in_channels=320, out_channels=128)
        self.conv_head_4 = conv1x1_layer(in_channels=24, out_channels=64)
        self.conv_head_5 = conv1x1_layer(in_channels=192, out_channels=128)
        self.conv_head_6 = conv1x1_layer(in_channels=40, out_channels=64)

        self.LSTM = nn.LSTM(input_size=768,
                            hidden_size=256,
                            bidirectional=True,
                            dropout=0.2,
                            num_layers=2,
                            batch_first=True)



        self.Res_conv_layer_1 = nn.Sequential(
                                              ResNeXtBlock(input_size =512 , output_size = 256, kernelsize =Kernel_Sizes_List[0], padding = 0 , \
                                                           expansion =2 , num_groups =32, is_short_cut = True),

                                             )

        self.Res_conv_layer_2 = nn.Sequential(
                                              ResNeXtBlock(input_size =512 , output_size = 256, kernelsize =Kernel_Sizes_List[1], padding = 1 , \
                                                           expansion =2 , num_groups =32, is_short_cut = True),
                                             )

        self.Res_conv_layer_3 = nn.Sequential(
                                              ResNeXtBlock(input_size =512 , output_size = 256, kernelsize =Kernel_Sizes_List[2], padding = 2 , \
                                                           expansion =2 , num_groups =32, is_short_cut = True),
                                             )

        self.Res_conv_layer_4 = nn.Sequential(
                                              ResNeXtBlock(input_size =512 , output_size = 256, kernelsize =Kernel_Sizes_List[3], padding = 3 , \
                                                           expansion =2 , num_groups =32, is_short_cut = True),
                                             )

        #1024,256
        self.fc_final = nn.Sequential(nn.Linear(1024, 256),
                                      nn.BatchNorm1d(256),
                                      nn.RReLU(inplace=True),
                                      nn.Linear(256, NUM_CLASS))
Пример #26
0
    def __init__(self, input_size):
        super(CNN_Model, self).__init__()

        # Convolutional layers effectively enable the model to recognize more and more complex patterns.
        # For example, if the first convolutional layer enables the model to recognize lines and curves, the
        # second convolutional layer enables the model to recognize shapes made from those lines and curves such as
        # squares and circles. etc.

        # Kernels effectively determine how much of the input data you are determining. The stride determines the
        # speed at which the kernel is being examined and padding restores a certain amount of the information
        # that is inevitably lost from the kernel extraction
        self.conv1 = nn.Conv1d(input_size,
                               int((input_size + 1) * 2),
                               31,
                               padding=15,
                               stride=1)
        self.conv2 = nn.Conv1d(int((input_size + 1) * 2),
                               int((input_size + 1) * 4),
                               31,
                               padding=15,
                               stride=1)
        self.conv3 = nn.Conv1d(int((input_size + 1) * 4),
                               int((input_size + 1) * 8),
                               31,
                               padding=15,
                               stride=1)
        self.conv4 = nn.Conv1d(int((input_size + 1) * 8),
                               int((input_size + 1) * 16),
                               31,
                               padding=15,
                               stride=1)

        # Same as convolutional but not nearly as much complexity involved. It won't make the model as smart as
        # convolutional layers would but they're faster, they're also necessary to set the correct number of outputs
        self.lin1 = nn.Linear(112 * 92, int((112 * 92) * ((2 / 3)**6)))
        self.lin2 = nn.Linear(int((112 * 92) * ((2 / 3)**6)),
                              int((112 * 92) * ((2 / 3)**12)))
        self.lin3 = nn.Linear(int((112 * 92) * ((2 / 3)**12)), 26)

        # Pooling is done to remove "uncertainties" from results of layers
        self.pool1 = nn.MaxPool1d(3, stride=1)
        self.pool2 = nn.MaxPool1d(3, stride=1)

        # Activation functions are extremely important. They "activate" the information from the layers to a usable form
        # Usually the final activation function is sigmoid if binary output or softmax if multiclass. For this
        # particular task, random relu happened to be the best.
        # Often leaky relu is a good default activation function to begin with
        self.random_relu = nn.RReLU()
Пример #27
0
    def test_rrelu_module(self):
        xla_device = xm.xla_device()
        a = torch.rand(1, 2, 2, requires_grad=True)
        xla_a = a.to(xla_device).detach()
        xla_a.requires_grad = True

        m = nn.RReLU()
        xla_m = m.to(xla_device)

        output = m(a)
        xla_output = xla_m(xla_a)
        self.assertEqual(output, xla_output.cpu())

        output.sum().backward()
        xla_output.sum().backward()
        self.assertEqual(a.grad, xla_a.grad.cpu())
    def __init__(self):
        super(amap_cnn, self).__init__()

        # 第一次卷积、池化
        self.conv1 = nn.Sequential(
            # 输入通道数in_channels,输出通道数(即卷积核的通道数)out_channels,卷积核大小kernel_size,步长stride,对称填0行列数padding
            # input:(bitch_size, 1, 48, 48), output:(bitch_size, 64, 48, 48), (48-3+2*1)/1+1 = 48
            # input:(bitch_size, 1, 64, 48), output:(bitch_size, 64, 64, 48), (64-3+2*1)/1+1 = 64
            # input:(bitch_size, 1, 256, 144), output:(bitch_size, 64, 256, 144), (64-3+2*1)/1+1 = 64
            nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),  # 卷积层
            nn.BatchNorm2d(num_features=64),  # 归一化
            nn.RReLU(inplace=True),  # 激活函数
            # output(bitch_size, 64, 128, 72)
            nn.MaxPool2d(kernel_size=2, stride=2),  # 最大值池化
        )

        # 第二次卷积、池化
        self.conv2 = nn.Sequential(
            # input:(bitch_size, 64, 32, 24), output:(bitch_size, 128, 32, 24), (32-3+2*1)/1+1 = 32
            # input:(bitch_size, 64, 128, 72), output:(bitch_size, 128, 128, 72), (32-3+2*1)/1+1 = 128
            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=128),# 归一化
            nn.RReLU(inplace=True),
            # output:(bitch_size, 128, 64 ,36)
            nn.MaxPool2d(kernel_size=2, stride=2),
        )

        # 第三次卷积、池化
        self.conv3 = nn.Sequential(
            # input:(bitch_size, 128, 64, 36), output:(bitch_size, 256, 64, 36), (16-3+2*1)/1+1 = 64
            nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=256),# 归一化
            nn.RReLU(inplace=True),
            # output:(bitch_size, 256, 32 ,18)
            nn.MaxPool2d(kernel_size=2, stride=2),
        )

        # 参数初始化
        self.conv1.apply(gaussian_weights_init)
        self.conv2.apply(gaussian_weights_init)
        self.conv3.apply(gaussian_weights_init)

        # 全连接层
        self.fc = nn.Sequential(
            nn.Dropout(p=0.2),
            nn.Linear(in_features=256 * 8 * 6, out_features=4096),
            nn.RReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(in_features=4096, out_features=1024),
            nn.RReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(in_features=1024, out_features=256),
            nn.RReLU(inplace=True),
            nn.Linear(in_features=256, out_features=3),
        )
Пример #29
0
 def __init__(self, filter_sizes, num_filter, fc_dim1):
     super(TextCNN, self).__init__()
     embed_mat = torch.from_numpy(
         np.load('../../data/word_embed_mat.npy').astype(np.float32))
     num_word, embed_dim = embed_mat.size()
     self.embed = nn.Embedding.from_pretrained(embed_mat, freeze=False)
     self.conv = nn.ModuleList([
         nn.Conv2d(1, num_filter, (size, embed_dim), bias=False)
         for size in filter_sizes
     ])
     self.act = nn.RReLU()
     self.fc = nn.Linear(len(filter_sizes) * num_filter, fc_dim1)
     self.out = nn.Linear(fc_dim1, 19)
     self.bn1 = nn.BatchNorm1d(len(filter_sizes) * num_filter)
     self.bn2 = nn.BatchNorm1d(fc_dim1)
     self._initialize_weights()
Пример #30
0
 def __init__(self,
              in_channels,
              out_channels,
              ks=3,
              pd=1,
              do=0.0,
              **kwargs):
     super(base_conv1d, self).__init__()
     self.conv = nn.Conv1d(in_channels,
                           out_channels,
                           bias=False,
                           kernel_size=ks,
                           padding=pd,
                           **kwargs)
     self.activation = nn.RReLU(inplace=True)
     self.conv.weight.data.normal_(0, 0.01)