Exemplo n.º 1
0
 def __init__(self):
     self.mid_dim = 14
     self.num_class = 2
     super().__init__()
     self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
     self.bn0 = M.BatchNorm1d(self.mid_dim)
     self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
     self.bn1 = M.BatchNorm1d(self.mid_dim)
     self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
Exemplo n.º 2
0
 def __init__(self):
     self.mid_dim = 14
     self.num_class = 2
     super().__init__()
     self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
     self.bn0 = M.BatchNorm1d(self.mid_dim)
     self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
     self.bn1 = M.BatchNorm1d(self.mid_dim)
     self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
     self.data = np.random.random((12, 2)).astype(np.float32)
Exemplo n.º 3
0
 def __init__(self, converter="normal"):
     self.converter = converter
     self.mid_dim = 14
     self.num_class = 2
     super().__init__()
     self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
     self.bn0 = M.BatchNorm1d(self.mid_dim)
     self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
     self.bn1 = M.BatchNorm1d(self.mid_dim)
     self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
     self.data = np.arange(24).reshape(12, 2).astype(np.float32)
Exemplo n.º 4
0
 def __init__(self, mode):
     super().__init__()
     self.mode = mode
     self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
     self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
     self.bn1d = M.BatchNorm1d(32)
     self.bn2d = M.BatchNorm2d(3)
Exemplo n.º 5
0
 def __init__(self, transpose=False):
     super().__init__()
     self.transpose = transpose
     self.data = np.random.random((10, 100)).astype(np.float32)
     weight = np.random.random((200, 100) if transpose else (100, 200))
     self.linear_weight = mge.Tensor(weight)
     self.bn = M.BatchNorm1d(200)
Exemplo n.º 6
0
 def __init__(self, i, value_embedding, key_embedding):
     self.key_embedding = key_embedding
     super(TransformerBlock).__init__()
     self.position_encoding = M.Linear(L, key_embedding)
     self.init_map = M.Linear(i, key_embedding)
     self.value_mapping = M.Linear(key_embedding, value_embedding)
     self.key_mapping = M.Linear(key_embedding, key_embedding)
     self.query_mapping = M.Linear(key_embedding, key_embedding)
     self.norm = M.BatchNorm1d(key_embedding)
Exemplo n.º 7
0
    def __init__(self, gate_channel, reduction_ratio=16, num_layers=1):
        super(ChannelGate, self).__init__()

        gate_channels = [gate_channel]
        gate_channels += [gate_channel // reduction_ratio] * num_layers
        gate_channels += [gate_channel]

        self.gate_c = M.Sequential(
            Flatten(), M.Linear(gate_channels[0], gate_channels[1]),
            M.BatchNorm1d(gate_channels[1]), M.ReLU(),
            M.Linear(gate_channels[-2], gate_channels[-1]))
Exemplo n.º 8
0
 def __init__(self, input_size: int, output_size: int):
     super(LinearBlock, self).__init__()
     self.relu = M.ReLU()
     self.normalize = M.BatchNorm1d(
         output_size,
         affine=True,
         momentum=0.999,
         eps=1e-3,
         track_running_stats=False,
     )
     self.linear = M.Linear(input_size, output_size)
     fc_init_(self.linear)
Exemplo n.º 9
0
    def __init__(self, feature_dim, channel, size=7):
        """initialzation

        Args:
            feature_dim (int): dimension number of output embedding
            channel (int): channel number of input feature map
            size (int, optional): size of input feature map. defaults to 7
        """
        super().__init__()
        self.size = size
        self.bn1 = M.BatchNorm2d(channel)
        self.dropout = M.Dropout(drop_prob=0.1)
        self.fc = M.Linear(channel, feature_dim)
        self.bn2 = M.BatchNorm1d(feature_dim, affine=False)
Exemplo n.º 10
0
 def __init__(self):
     super().__init__()
     self.data = np.random.random((10, 100)).astype(np.float32)
     self.linear = M.Linear(100, 200, bias=False)
     self.bn = M.BatchNorm1d(200)