Esempio n. 1
0
    def forward(self, q, a_batch):
        # bs, seq, Hidden*2
        q = self.lstm_q(q)
        # bs, 1, seq, hid*2
        q = autograd.reshape(q, (q.shape[0], 1, q.shape[1], q.shape[2]))
        # bs, 1, 1, hid*2
        q = self.q_pool(q)
        # bs, hid*2
        q = autograd.reshape(q, (q.shape[0], q.shape[3]))

        # 2bs, seq, Hidden*2
        a_batch = self.lstm_a(a_batch)
        # 2bs, 1, seq, hid*2
        a_batch = autograd.reshape(
            a_batch, (a_batch.shape[0], 1, a_batch.shape[1], a_batch.shape[2]))
        # 2bs, 1, 1, hid*2
        a_batch = self.a_pool(a_batch)
        # 2bs, hid*2
        a_batch = autograd.reshape(a_batch,
                                   (a_batch.shape[0], a_batch.shape[3]))

        # 2*(bs, hid*2)
        a_pos, a_neg = autograd.split(a_batch, 0, [q.shape[0], q.shape[0]])

        sim_pos = autograd.cossim(q, a_pos)
        sim_neg = autograd.cossim(q, a_neg)
        return sim_pos, sim_neg
Esempio n. 2
0
    def forward(self, q, a_batch):
        q = autograd.reshape(q, (q.shape[0], -1))  # bs, seq_q*data_s
        a_batch = autograd.reshape(a_batch,
                                   (a_batch.shape[0], -1))  # 2bs, seq_a*data_s

        q = self.linear_q(q)  # bs, hid_s
        a_batch = self.linear_a(a_batch)  # 2bs, hid_s

        a_pos, a_neg = autograd.split(a_batch, 0,
                                      [q.shape[0], q.shape[0]])  # 2*(bs, hid)

        sim_pos = autograd.cossim(q, a_pos)
        sim_neg = autograd.cossim(q, a_neg)
        return sim_pos, sim_neg
Esempio n. 3
0
 def forward(self, x):
     y = self.lstm(x)
     y = autograd.reshape(y, (y.shape[0], -1))
     y = self.l1(y)
     y = autograd.relu(y)
     y = self.l2(y)
     return y
Esempio n. 4
0
    def predict(self, queries: List[str]):
        print("Get queries")

        res = []
        queries = [self._preprocess(ele) for ele in queries]

        for input_ids in queries:
            x = tensor.Tensor(device=self.dev, data=input_ids)
            out = []
            for i in range(self.length):
                y = self._model.forward(x)
                y = autograd.reshape(y, y.shape[-2:])[-1, :]
                y = tensor.softmax(y)
                y = tensor.to_numpy(y)[0]
                y = np.argsort(y)[-1]
                out.append(y)
                y = np.array([y]).reshape([1, 1, -1]).astype(np.float32)
                y = tensor.Tensor(device=self.dev, data=y)
                x = tensor.concatenate([x, y], 2)
            result = self._postprocess(out)
            res.append(result)
        return res
Esempio n. 5
0
 def forward(self, x):
     y = self.conv1(x)
     y = self.bn1(y)
     y = autograd.reshape(y, (y.shape[0], -1))
     y = self.doublelinear1(y)
     return y
Esempio n. 6
0
 def forward(self, x):
     y = self.lstm(x)
     if self.return_sequences:
         y = autograd.reshape(y, (-1, self.seq_length * self.hidden_size))
     return y
Esempio n. 7
0
 def forward(self, x, seq_lengths):
     y = self.lstm(x, seq_lengths=seq_lengths)
     y = autograd.reshape(y, (y.shape[0], -1))
     y = self.l1(y)
     return y
Esempio n. 8
0
    logging.info("model compling...")
    dev = device.get_default_device()
    x = tensor.Tensor(device=dev, data=input_ids)
    model = MyModel(onnx_model)

    # verifty the test
    # from utils import load_dataset
    # sg_ir = sonnx.prepare(onnx_model) # run without graph
    # inputs, ref_outputs = load_dataset(
    #     os.path.join('/tmp', 'GPT-2-LM-HEAD', 'test_data_set_0'))
    # outputs = sg_ir.run(inputs)
    # for ref_o, o in zip(ref_outputs, outputs):
    #     np.testing.assert_almost_equal(ref_o, o, 4)

    logging.info("model running...")
    output = []

    for i in range(length):
        logging.info("word {} generating...".format(i))
        y = model.forward(x)
        y = autograd.reshape(y, y.shape[-2:])[-1, :]
        y = tensor.softmax(y)
        y = tensor.to_numpy(y)[0]
        y = np.argsort(y)[-1]
        output.append(y)
        y = np.array([y]).reshape([1, 1, -1]).astype(np.float32)
        y = tensor.Tensor(device=dev, data=y)
        x = tensor.concatenate([x, y], 2)

    text = postprocess(output)
    print(text)
Esempio n. 9
0
 def loss(self, out, ty):
     ty = autograd.reshape(ty, (-1, 1))
     return autograd.softmax_cross_entropy(out, ty)
Esempio n. 10
0
 def forward(self, inputs):
     x, self.hx, self.cx = self.rnn(inputs, (self.hx, self.cx))
     x = autograd.cat(x)
     x = autograd.reshape(x, (-1, self.hidden_size))
     return self.dense(x)