Beispiel #1
0
def weight_variable(fan_in):
    """Init weight."""
    stddev = (1.0/fan_in)**0.5
    return TruncatedNormal(stddev)
Beispiel #2
0
 ('BertPretrainingLoss', {
     'block':
     BertPretrainingLoss(config=BertConfig(batch_size=1)),
     'desc_inputs': [[32000], [20, 2],
                     Tensor(np.array([1]).astype(np.int32)), [20],
                     Tensor(np.array([20]).astype(np.int32))],
     'desc_bprop': [[1]],
     'num_output':
     1
 }),
 ('Dense_1', {
     'block':
     nn.Dense(in_channels=768,
              out_channels=3072,
              activation='gelu',
              weight_init=TruncatedNormal(0.02)),
     'desc_inputs': [[3, 768]],
     'desc_bprop': [[3, 3072]]
 }),
 ('Dense_2', {
     'block':
     set_train(
         nn.Dense(
             in_channels=768,
             out_channels=3072,
             activation='gelu',
             weight_init=TruncatedNormal(0.02),
         )),
     'desc_inputs': [[3, 768]],
     'desc_bprop': [[3, 3072]]
 }),
Beispiel #3
0
 def __init__(self, in_channels, out_channels):
     super(OutConv, self).__init__()
     init_value = TruncatedNormal(0.06)
     self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, has_bias=True, weight_init=init_value)
Beispiel #4
0
def weight_variable(sigma):
    return TruncatedNormal(sigma)  # 0.02
Beispiel #5
0
def _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='same', has_bias=False):
    init_value = TruncatedNormal(0.02)
    return nn.Conv2d(in_channels, out_channels,
                     kernel_size=kernel_size, stride=stride, padding=padding,
                     pad_mode=pad_mode, weight_init=init_value, has_bias=has_bias)
Beispiel #6
0
 def __init__(self, n_features, n_classes):
     super(LogisticRegression, self).__init__()
     self.model = nn.Dense(n_features, n_classes, TruncatedNormal(0.02),
                           TruncatedNormal(0.02))