示例#1
0
def create_ntee_from_compoents(dir_path):
    word_dict_path = dir_path + '/dict.word'
    word_embs_path = dir_path + '/word_embeddings.npy'
    entity_dict_path = dir_path + '/dict.entity'
    entity_embs_path = dir_path + '/entity_embeddings.npy'
    W_path = dir_path + '/W.npy'
    b_path = dir_path + '/b.npy'

    print('load voca and embeedings')
    word_voca, word_embs = utils.load_voca_embs(word_dict_path, word_embs_path)
    entity_voca, entity_embs = utils.load_voca_embs(entity_dict_path,
                                                    entity_embs_path)
    config = {
        'word_embeddings': word_embs,
        'entity_embeddings': entity_embs,
        'word_voca': word_voca,
        'entity_voca': entity_voca,
        'emb_dims': word_embs.shape[1]
    }
    print("word_embs.shape:", word_embs.shape, "entity_embs.shape:",
          entity_embs.shape)

    # create model
    print('create model')
    model = NTEE(config)

    W = np.load(W_path)
    b = np.load(b_path)
    model.linear.weight = nn.parameter(torch.FloatTensor(W).t())
    model.linear.bias = nn.parameter(torch.FloatTensor(b))

    return model
示例#2
0
 def __init__(self, hidden_size):
     super(Attention, self).__init__()
     self.weight = nn.parameter(torch.Tensor(hidden_size, hidden_size))
     self.weight.data.normal_(mean=0.0, std=0.05)
     self.bias = nn.parameter(torch.Tensor(hidden_size))
     b = np.zeros(hidden_size, dtype=np.float32)
     self.bias.data.copy_(torch.from_numpy(b))
     self.query = nn.Parameter(torch.Tensor(hidden_size))
     self.query.data.normal_(mean=0.0, std=0.05)
示例#3
0
    def __init__(self, channels: int, eps: float=1e-5, affine: bool=True,
                 momentum: float=0.1, ema: bool=True):

        super.__init__()
        self.channels = channels
        self.eps = eps
        self.momentum = momentum
        self.ema = ema
        if self.ema:
            self.register_buffer('ema_mean', torch.zeros(self.channels))
            self.register_buffer('ema_var', torch.ones(self.channels))
        if self.affine:
            self.scale = nn.parameter(torch.ones(self.channels))
            self.shift = nn.parameter(torch.zeros(self.channels)) 
示例#4
0
    def __init__(self, args, label_number=10):

        super(NoiseCorrectionLoss, self).__init__()

        self.big_table = nn.parameter(torch.zeros((label_number, 2)))
        self.args = args
        self.classification_loss = ClassificationNoiseCorrectionLoss()
示例#5
0
 def __init__(self, d_input, d_attn):
     '''
     d_input: the video feature size 
     d_attn: the attn size used in the attention calculation
     '''
     super(StepTopDownAttention, self).__init__()
     self.w_a = nn.parameter(torch.Tensor(d_attn))
     self.f_a = GatedTanhLayer(d_input, d_attn)
示例#6
0
文件: CoraData.py 项目: cenyc/GCN5
 def __init__(self, input_dim, out_dim, use_bias=True):
     super(GraphConvolution, self).__init__()
     self.input_dim = input_dim
     self.output_dim = out_dim
     self.use_bias = use_bias
     self.weight = nn.Parameter(torch.Tensor(input_dim, out_dim))
     if self.use_bias:
         self.bias = nn.parameter(torch.Tensor(out_dim))
     else:
         self.register_parameter('bias', None)
     self.reset_parameters()
示例#7
0
    def __init__(self, hidden_size, method='dot'):
        super(GlobalAttention, self).__init__()

        self.hidden_size = hidden_size
        self.method = method
        if self.method not in ['dot', 'general', 'concat']:
            raise ValueError("please input correct method for attention score")
        if self.method == 'general':
            self.w_general = nn.Linear(hidden_size, hidden_size)
        elif self.method == 'concat':
            self.w_concat = nn.Linear(hidden_size * 2, hidden_size)
            self.v_concat = nn.parameter(torch.FloatTensor(hidden_size))
 def __init__(self, options, weights = None):
     super(Attention, self).__init__()
     self.n_encoder = options['n_encoder']
     self.n_decoder = options['n_decoder']
     self.n_att = options['n_att']
     self.layer_en = nn.Linear(self.n_encoder, self.n_att)
     self.layer_de = nn.Linear(self.n_decoder, self.n_att, bias = False)
     self.layer_att = nn.Linear(self.n_att, 1)
     if weights is not None:
         self.layer_en.weights = nn.Parameter(weights[0])
         self.layer_de.weights = nn.Parameter(weights[1])
         self.layer_att.weights = nn.parameter(weights[2])
     
     self.fixed = options['fixed']
     if self.fixed:
         self.layer_en.weight.requires_grad = False
         self.layer_de.weight.requires_grad = False
         self.layer_att.weight.requires_grad = False
示例#9
0
    def __init__(self,
                 capsule_num,
                 route_nodes_num,
                 input_channels,
                 output_channels,
                 kernel_size=None,
                 stride=None,
                 iterations=3):

        super(CapsLayer, self).__init__()

        self.iterations = iterations
        self.output_channels = output_channels
        self.input_channels = input_channels
        self.route_nodes_num = route_nodes_num
        self.capsule_num = capsule_num

        if self.route_nodes_num != -1:
            #this is digit caps, assign weights for that layer

            self.weights = nn.parameter(
                torch.randn(self.capsule_num, self.route_nodes_num,
                            self.input_channels, self.output_channels))

        else:
            #this is primary caps
            convLayers = []

            for i in range(self.capsule_num):
                convLayers.append(
                    nn.Conv2d(self.input_channels,
                              self.output_channels,
                              kernel_size=kernel_size,
                              stride=stride,
                              padding=0))
            self.capsules = convLayers
示例#10
0
 def __init__(self, lc, lp, lq):
     super(STResNet, self).__init__()
     self.wc = nn.Parameter(torch.ones((lc, 20, 20)))
     self.wp = nn.parameter(torch.ones((lp, 20, 20)))
     self.wq = nn.Parameter(torch.ones((lq, 20, 20)))
     pass
示例#11
0
文件: net.py 项目: LOOKCC/SSD
 def __init__(self, in_features, scale):
     super(L2Norm, self).__init__()
     self.weight = nn.parameter(torch.Tensor(in_features))
     self.reset_parameter(scale)
 def __init__(self, train_beta=False):
     super(Swish, self).__init__()
     if train_beta:
         self.weight = nn.parameter(torch.Tensor([1.]))
     else:
         self.weight = 1.0
示例#13
0
    def __init__(self, input_size, hidden_size):
        super().__init__()

        self.input_size_ = input_size
        self.hidden_size_ = hidden_size

        #Gate for input
        self.wii = parameter(torch.Tensor(input_size, hidden_size))
        self.whi = parameter(torch.Tensor(hidden_size, hidden_size))
        self.bi = parameter(torch.Tensor(hidden_size))

        #gate for output
        self.wio = parameter(torch.Tensor(input_size, hidden_size))
        self.who = parameter(torch.Tensor(hidden_size, hidden_size))
        self.bo = parameter(torch.Tensor(hidden_size))

        #gate for forget
        self.wif = parameter(torch.Tensor(input_size, hidden_size))
        self.whf = parameter(torch.Tensor(hidden_size, hidden_size))
        self.bf = parameter(torch.Tensor(hidden_size))

        #info carried
        self.wig = parameter(torch.Tensor(input_size, hidden_size))
        self.whg = parameter(torch.Tensor(hidden_size, hidden_size))
        self.bg = parameter(torch.Tensor(hidden_size))
示例#14
0
 def __init__(self, features, eps=1e-6):
     super(LayerNorm, self).__init__()
     self.a_2 = nn.parameter(torch.ones(features))
     self.b_2 = nn.parameter(torch.zeros(features))
     self.eps = eps