Exemplo n.º 1
0
def main():
    batch_size = 1
    seq_len = 24
    assert batch_size == 1

    test = LPRIter(
        path_imgrec='/opt/data/plate/color_rec/blue_vertices.rec',
        path_imglist='/opt/data/plate/color_rec/vpr_blue_vertices.txt',
        label_width=8,
        # mean_img            = '/opt/data/plate/rec/mean_plate.bin',
        data_shape=(3, 94, 24),
        batch_size=batch_size,
        augment=False)

    mod = load_module(
        '/opt/models/mxnet/plate/plate_lprnet/deploy_lprnet_concat', 500,
        test.provide_data)

    batchnum = 0
    hit = 0.
    total = 0.

    test_error_list = []

    fsequence = open('/opt/incubator-mxnet/example/ctc/sequence.txt', 'w')

    while True:
        try:
            batch = test.next()
            input_data = batch.data[0].asnumpy()
            batchnum += 1
            mod.forward(batch)
            output = mod.get_outputs()[0].asnumpy()
            topk = ctc_decoder.decode(output)
            label = CtcMetrics._remove_blank(batch.label[0].asnumpy()[0])
            is_match = top_k_match(label, topk)
            if is_match:
                hit += 1
            else:
                decode_l = index_2_char(label)
                decode_p = index_2_char(topk[0][0])
                print_str = decode_l + '\t' + decode_p
                print("label:%s\tpred:%s" % (decode_l, decode_p))
                test_error_list.append(print_str)
            total += 1.0

            print(total)
        except StopIteration:
            break
    ferror = open('/opt/incubator-mxnet/example/ctc/test_topk_error_list.txt',
                  'w')
    for file_str in test_error_list:
        ferror.writelines(file_str + '\n')
    ferror.close()
    print(hit / total, hit, total)
    fsequence.close()
 def infer(self, inputs, labels):
     x, y, x_lens, y_lens = self.collate(inputs, labels)
     out, probs = self.forward_imp(x, softmax=True)
     probs = probs.cpu().detach().numpy()
     x_lens = x_lens.cpu().detach().numpy()
     res = [
         decode(p[:l], beam_size=self.beam_size, blank=self.blank)[0]
         for p, l in zip(probs, x_lens)
     ]
     return res
 def eval_model(self, inputs, labels, beam_size=1):
     # Calculate loss and CER together
     self.eval()
     x, y, x_lens, y_lens = self.collate(inputs, labels)
     out, probs = self.forward_imp(x, softmax=True)
     loss = self.ctc_loss(out, y, x_lens, y_lens)
     # Turn to cpu
     probs = probs.cpu().detach().numpy()
     # Use CTC decoder beam size to get prediction of characters
     x_lens = x_lens.cpu().detach().numpy()
     if beam_size == 1:
         pred = [
             max_decode(p[:l], blank=self.blank)
             for p, l in zip(probs, x_lens)
         ]
     else:
         pred = [
             decode(p[:l], beam_size=beam_size, blank=self.blank)[0]
             for p, l in zip(probs, x_lens)
         ]
     return loss.item(), pred