예제 #1
0
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    transform = Compose([GroupScale(), GroupCenterCrop(), NormalizeImage()])
    dataset = KineticsDataset(
        pickle_file=FLAGS.infer_file,
        label_list=FLAGS.label_list,
        mode='test',
        transform=transform)
    labels = dataset.label_list

    model = tsm_resnet50(
        num_classes=len(labels), pretrained=FLAGS.weights is None)

    inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')]

    model.prepare(inputs=inputs, device=FLAGS.device)

    if FLAGS.weights is not None:
        model.load(FLAGS.weights, reset_optimizer=True)

    imgs, label = dataset[0]
    pred = model.test_batch([imgs[np.newaxis, :]])
    pred = labels[np.argmax(pred)]
    logger.info("Sample {} predict label: {}, ground truth label: {}" \
                .format(FLAGS.infer_file, pred, labels[int(label)]))
예제 #2
0
파일: test_text.py 프로젝트: neuzxy/Paddle
 def make_inputs(self):
     inputs = [
         Input([None, None, self.inputs[0].shape[-1]],
               "float32",
               name="dec_input"),
         Input([None, None, self.inputs[0].shape[-1]],
               "float32",
               name="enc_output"),
         Input([None, self.inputs[-1].shape[1], None, None],
               "float32",
               name="self_attn_bias"),
         Input([None, self.inputs[-1].shape[1], None, None],
               "float32",
               name="cross_attn_bias"),
     ]
     return inputs
예제 #3
0
파일: test_text.py 프로젝트: neuzxy/Paddle
 def make_inputs(self):
     inputs = [
         Input([None, None, self.inputs[-1].shape[-1]],
               "float32",
               name="input"),
     ]
     return inputs
예제 #4
0
    def test_test_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            m.eval()
            output = m(to_variable(data))
            fluid.disable_dygraph()
            return output.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            model = MyModel()
            inputs = [Input([None, dim], 'float32', name='x')]
            model.prepare(inputs=inputs, device=device)
            out, = model.test_batch([data])

            np.testing.assert_allclose(out, ref)
            fluid.disable_dygraph() if dynamic else None
예제 #5
0
    def test_export_deploy_model(self):
        model = LeNet()
        inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
        model.prepare(inputs=inputs)
        save_dir = tempfile.mkdtemp()
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        tensor_img = np.array(np.random.random((1, 1, 28, 28)),
                              dtype=np.float32)
        ori_results = model.test_batch(tensor_img)

        model.save_inference_model(save_dir)

        place = fluid.CPUPlace(
        ) if not fluid.is_compiled_with_cuda() else fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        [inference_program, feed_target_names,
         fetch_targets] = (fluid.io.load_inference_model(dirname=save_dir,
                                                         executor=exe))

        results = exe.run(inference_program,
                          feed={feed_target_names[0]: tensor_img},
                          fetch_list=fetch_targets)

        np.testing.assert_allclose(results, ori_results)
        shutil.rmtree(save_dir)
예제 #6
0
파일: train.py 프로젝트: swtkiwi/hapi
def do_train(args):
    device = set_device("gpu" if args.use_gpu else "cpu")
    fluid.enable_dygraph(device) if args.eager_run else None

    if args.enable_ce:
        fluid.default_main_program().random_seed = 102
        fluid.default_startup_program().random_seed = 102

    # define model
    inputs = [
        Input([None, None], "int64", name="src_word"),
        Input([None], "int64", name="src_length"),
        Input([None, None], "int64", name="trg_word"),
    ]
    labels = [
        Input([None], "int64", name="trg_length"),
        Input([None, None, 1], "int64", name="label"),
    ]

    # def dataloader
    train_loader, eval_loader = create_data_loader(args, device)

    model_maker = get_model_cls(
        AttentionModel) if args.attention else get_model_cls(BaseModel)
    model = model_maker(args.src_vocab_size, args.tar_vocab_size,
                        args.hidden_size, args.hidden_size, args.num_layers,
                        args.dropout)
    grad_clip = fluid.clip.GradientClipByGlobalNorm(
        clip_norm=args.max_grad_norm)
    optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate,
                                     parameter_list=model.parameters(),
                                     grad_clip=grad_clip)

    ppl_metric = PPL(reset_freq=100)  # ppl for every 100 batches
    model.prepare(optimizer,
                  CrossEntropyCriterion(),
                  ppl_metric,
                  inputs=inputs,
                  labels=labels,
                  device=device)
    model.fit(train_data=train_loader,
              eval_data=eval_loader,
              epochs=args.max_epoch,
              eval_freq=1,
              save_freq=1,
              save_dir=args.model_path,
              callbacks=[TrainCallback(ppl_metric, args.log_freq)])
예제 #7
0
    def test_lenet(self):
        lenet = models.__dict__['LeNet']()

        inputs = [Input([None, 1, 28, 28], 'float32', name='x')]
        lenet.prepare(inputs=inputs)

        x = np.array(np.random.random((2, 1, 28, 28)), dtype=np.float32)
        lenet.test_batch(x)
예제 #8
0
    def run_callback(self):
        epochs = 2
        steps = 50
        freq = 2
        eval_steps = 20

        lenet = LeNet()
        inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
        lenet.prepare(inputs=inputs)

        cbks = config_callbacks(
            model=lenet,
            batch_size=128,
            epochs=epochs,
            steps=steps,
            log_freq=freq,
            verbose=self.verbose,
            metrics=['loss', 'acc'],
            save_dir=self.save_dir)
        cbks.on_begin('train')

        logs = {'loss': 50.341673, 'acc': 0.00256}
        for epoch in range(epochs):
            cbks.on_epoch_begin(epoch)
            for step in range(steps):
                cbks.on_batch_begin('train', step, logs)
                logs['loss'] -= random.random() * 0.1
                logs['acc'] += random.random() * 0.1
                time.sleep(0.005)
                cbks.on_batch_end('train', step, logs)
            cbks.on_epoch_end(epoch, logs)

            eval_logs = {'eval_loss': 20.341673, 'eval_acc': 0.256}
            params = {
                'steps': eval_steps,
                'metrics': ['eval_loss', 'eval_acc'],
            }
            cbks.on_begin('eval', params)
            for step in range(eval_steps):
                cbks.on_batch_begin('eval', step, eval_logs)
                eval_logs['eval_loss'] -= random.random() * 0.1
                eval_logs['eval_acc'] += random.random() * 0.1
                eval_logs['batch_size'] = 2
                time.sleep(0.005)
                cbks.on_batch_end('eval', step, eval_logs)
            cbks.on_end('eval', eval_logs)

            test_logs = {}
            params = {'steps': eval_steps}
            cbks.on_begin('test', params)
            for step in range(eval_steps):
                cbks.on_batch_begin('test', step, test_logs)
                test_logs['batch_size'] = 2
                time.sleep(0.005)
                cbks.on_batch_end('test', step, test_logs)
            cbks.on_end('test', test_logs)

        cbks.on_end('train')
예제 #9
0
파일: train.py 프로젝트: wzzju/hapi
def main(args):
    place = set_device(args.device)
    fluid.enable_dygraph(place) if args.dynamic else None

    inputs = [
        Input([None, None], 'int64', name='words'),
        Input([None], 'int64', name='length'),
        Input([None, None], 'int64', name='target')
    ]

    labels = [Input([None, None], 'int64', name='labels')]

    feed_list = None if args.dynamic else [
        x.forward() for x in inputs + labels
    ]

    dataset = LacDataset(args)
    train_dataset = LacDataLoader(args, place, phase="train")

    vocab_size = dataset.vocab_size
    num_labels = dataset.num_labels
    model = SeqTagging(args, vocab_size, num_labels, mode="train")

    optim = AdamOptimizer(learning_rate=args.base_learning_rate,
                          parameter_list=model.parameters())

    model.prepare(optim,
                  LacLoss(),
                  ChunkEval(num_labels),
                  inputs=inputs,
                  labels=labels,
                  device=args.device)

    if args.init_from_checkpoint:
        model.load(args.init_from_checkpoint)

    if args.init_from_pretrain_model:
        model.load(args.init_from_pretrain_model, reset_optimizer=True)

    model.fit(train_dataset.dataloader,
              epochs=args.epoch,
              batch_size=args.batch_size,
              eval_freq=args.eval_freq,
              save_freq=args.save_freq,
              save_dir=args.save_dir)
예제 #10
0
 def test_save_load(self):
     path = tempfile.mkdtemp()
     for dynamic in [True, False]:
         device = set_device('cpu')
         fluid.enable_dygraph(device) if dynamic else None
         model = MyModel()
         inputs = [Input([None, 20], 'float32', name='x')]
         labels = [Input([None, 1], 'int64', name='label')]
         optim = fluid.optimizer.SGD(learning_rate=0.001,
                                     parameter_list=model.parameters())
         model.prepare(inputs=inputs,
                       optimizer=optim,
                       loss_function=CrossEntropy(average=False),
                       labels=labels)
         model.save(path + '/test')
         model.load(path + '/test')
         shutil.rmtree(path)
         fluid.disable_dygraph() if dynamic else None
예제 #11
0
파일: eval.py 프로젝트: wzzju/hapi
def main(FLAGS):
    device = set_device("gpu" if FLAGS.use_gpu else "cpu")
    fluid.enable_dygraph(device) if FLAGS.dynamic else None
    model = Seq2SeqAttModel(encoder_size=FLAGS.encoder_size,
                            decoder_size=FLAGS.decoder_size,
                            emb_dim=FLAGS.embedding_dim,
                            num_classes=FLAGS.num_classes)

    # yapf: disable
    inputs = [
        Input([None, 1, 48, 384], "float32", name="pixel"),
        Input([None, None], "int64", name="label_in")
    ]
    labels = [
        Input([None, None], "int64", name="label_out"),
        Input([None, None], "float32", name="mask")
    ]
    # yapf: enable

    model.prepare(loss_function=WeightCrossEntropy(),
                  metrics=SeqAccuracy(),
                  inputs=inputs,
                  labels=labels,
                  device=device)
    model.load(FLAGS.init_model)

    test_dataset = data.test()
    test_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(),
         data.PadTarget()])
    test_sampler = data.BatchSampler(test_dataset,
                                     batch_size=FLAGS.batch_size,
                                     drop_last=False,
                                     shuffle=False)
    test_loader = fluid.io.DataLoader(test_dataset,
                                      batch_sampler=test_sampler,
                                      places=device,
                                      num_workers=0,
                                      return_list=True,
                                      collate_fn=test_collate_fn)

    model.evaluate(eval_data=test_loader,
                   callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
예제 #12
0
    def test_train_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            optim = fluid.optimizer.SGD(learning_rate=0.001,
                                        parameter_list=m.parameters())
            m.train()
            output = m(to_variable(data))
            l = to_variable(label)
            loss = fluid.layers.cross_entropy(output, l)
            avg_loss = fluid.layers.reduce_sum(loss)
            avg_loss.backward()
            optim.minimize(avg_loss)
            m.clear_gradients()
            fluid.disable_dygraph()
            return avg_loss.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            model = MyModel()

            optim2 = fluid.optimizer.SGD(learning_rate=0.001,
                                         parameter_list=model.parameters())

            inputs = [Input([None, dim], 'float32', name='x')]
            labels = [Input([None, 1], 'int64', name='label')]
            model.prepare(optim2,
                          loss_function=CrossEntropy(average=False),
                          inputs=inputs,
                          labels=labels,
                          device=device)
            loss, = model.train_batch([data], [label])

            np.testing.assert_allclose(loss.flatten(), ref.flatten())
            fluid.disable_dygraph() if dynamic else None
예제 #13
0
 def test_parameters(self):
     for dynamic in [True, False]:
         device = set_device('cpu')
         fluid.enable_dygraph(device) if dynamic else None
         model = MyModel()
         inputs = [Input([None, 20], 'float32', name='x')]
         model.prepare(inputs=inputs)
         params = model.parameters()
         self.assertTrue(params[0].shape[0] == 20)
         self.assertTrue(params[0].shape[1] == 10)
         fluid.disable_dygraph() if dynamic else None
예제 #14
0
def main():
    place = set_device(FLAGS.device)
    fluid.enable_dygraph(place) if FLAGS.dynamic else None

    # Generators
    g_AB = Generator()
    g_BA = Generator()
    g = GeneratorCombine(g_AB, g_BA, is_train=False)

    im_shape = [-1, 3, 256, 256]
    input_A = Input(im_shape, 'float32', 'input_A')
    input_B = Input(im_shape, 'float32', 'input_B')
    g.prepare(inputs=[input_A, input_B], device=FLAGS.device)
    g.load(FLAGS.init_model, skip_mismatch=True, reset_optimizer=True)

    if not os.path.exists(FLAGS.output):
        os.makedirs(FLAGS.output)

    test_data_A = data.TestDataA()
    test_data_B = data.TestDataB()

    for i in range(len(test_data_A)):
        data_A, A_name = test_data_A[i]
        data_B, B_name = test_data_B[i]
        data_A = np.array(data_A).astype("float32")
        data_B = np.array(data_B).astype("float32")

        fake_A, fake_B, cyc_A, cyc_B = g.test_batch([data_A, data_B])

        datas = [fake_A, fake_B, cyc_A, cyc_B, data_A, data_B]
        odatas = []
        for o in datas:
            d = np.squeeze(o[0]).transpose([1, 2, 0])
            im = ((d + 1) * 127.5).astype(np.uint8)
            odatas.append(im)
        imsave(FLAGS.output + "/fakeA_" + B_name, odatas[0])
        imsave(FLAGS.output + "/fakeB_" + A_name, odatas[1])
        imsave(FLAGS.output + "/cycA_" + A_name, odatas[2])
        imsave(FLAGS.output + "/cycB_" + B_name, odatas[3])
        imsave(FLAGS.output + "/inputA_" + A_name, odatas[4])
        imsave(FLAGS.output + "/inputB_" + B_name, odatas[5])
예제 #15
0
    def setUpClass(cls):
        if not fluid.is_compiled_with_cuda():
            self.skipTest('module not tested when ONLY_CPU compling')
        cls.device = set_device('gpu')
        fluid.enable_dygraph(cls.device)

        sp_num = 1280
        cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num)
        cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num)
        cls.test_dataset = MnistDataset(mode='test',
                                        return_label=False,
                                        sample_num=sp_num)

        cls.train_loader = fluid.io.DataLoader(cls.train_dataset,
                                               places=cls.device,
                                               batch_size=64)
        cls.val_loader = fluid.io.DataLoader(cls.val_dataset,
                                             places=cls.device,
                                             batch_size=64)
        cls.test_loader = fluid.io.DataLoader(cls.test_dataset,
                                              places=cls.device,
                                              batch_size=64)

        seed = 333
        fluid.default_startup_program().random_seed = seed
        fluid.default_main_program().random_seed = seed

        dy_lenet = LeNetDygraph()
        cls.init_param = dy_lenet.state_dict()
        dynamic_train(dy_lenet, cls.train_loader)

        cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader)

        cls.inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
        cls.labels = [Input([None, 1], 'int64', name='label')]

        cls.save_dir = tempfile.mkdtemp()
        cls.weight_path = os.path.join(cls.save_dir, 'lenet')
        fluid.dygraph.save_dygraph(dy_lenet.state_dict(), cls.weight_path)

        fluid.disable_dygraph()
예제 #16
0
def main(args):
    place = set_device(args.device)
    fluid.enable_dygraph(place) if args.dynamic else None

    inputs = [
        Input(
            [None, None], 'int64', name='words'),
        Input(
            [None], 'int64', name='length'),
    ]

    dataset = LacDataset(args)
    predict_dataset = LacDataLoader(args, place, phase="predict")

    vocab_size = dataset.vocab_size
    num_labels = dataset.num_labels
    model = SeqTagging(args, vocab_size, num_labels, mode="predict")

    model.mode = "test"
    model.prepare(inputs=inputs)

    model.load(args.init_from_checkpoint, skip_mismatch=True)

    f = open(args.output_file, "wb")
    for data in predict_dataset.dataloader:
        if len(data) == 1:
            input_data = data[0]
        else:
            input_data = data
        results, length = model.test_batch(inputs=flatten(input_data))
        for i in range(len(results)):
            word_len = length[i]
            word_ids = results[i][:word_len]
            tags = [dataset.id2label_dict[str(id)] for id in word_ids]
            if six.PY3:
                tags = [bytes(tag, encoding="utf8") for tag in tags]
                out = b"\002".join(tags) + b"\n"
                f.write(out)
            else:
                f.write("\002".join(tags) + "\n")
예제 #17
0
    def models_infer(self, arch, pretrained=False, batch_norm=False):

        x = np.array(np.random.random((2, 3, 224, 224)), dtype=np.float32)
        if batch_norm:
            model = models.__dict__[arch](pretrained=pretrained,
                                          batch_norm=True)
        else:
            model = models.__dict__[arch](pretrained=pretrained)
        inputs = [Input([None, 3, 224, 224], 'float32', name='image')]

        model.prepare(inputs=inputs)

        model.test_batch(x)
예제 #18
0
    def test_static_multiple_gpus(self):
        device = set_device('gpu')

        fluid.enable_dygraph(device)
        im_shape = (-1, 1, 28, 28)
        batch_size = 128

        inputs = [Input(im_shape, 'float32', name='image')]
        labels = [Input([None, 1], 'int64', name='label')]

        train_dataset = MnistDataset(mode='train')
        val_dataset = MnistDataset(mode='test')
        test_dataset = MnistDataset(mode='test', return_label=False)

        model = LeNet()
        optim = fluid.optimizer.Momentum(learning_rate=0.001,
                                         momentum=.9,
                                         parameter_list=model.parameters())
        loss = CrossEntropy()
        model.prepare(optim, loss, Accuracy(), inputs, labels, device=device)
        cbk = ProgBarLogger(50)

        model.fit(train_dataset,
                  val_dataset,
                  epochs=2,
                  batch_size=batch_size,
                  callbacks=cbk)

        eval_result = model.evaluate(val_dataset, batch_size=batch_size)

        output = model.predict(test_dataset,
                               batch_size=batch_size,
                               stack_outputs=True)

        np.testing.assert_equal(output[0].shape[0], len(test_dataset))

        acc = compute_accuracy(output[0], val_dataset.labels)

        np.testing.assert_allclose(acc, eval_result['acc'])
예제 #19
0
파일: infer.py 프로젝트: wzzju/hapi
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    inputs = [
        Input([None, 1], 'int64', name='img_id'),
        Input([None, 2], 'int32', name='img_shape'),
        Input([None, 3, None, None], 'float32', name='image')
    ]

    cat2name = load_labels(FLAGS.label_list, with_background=False)

    model = yolov3_darknet53(num_classes=len(cat2name),
                             model_mode='test',
                             pretrained=FLAGS.weights is None)

    model.prepare(inputs=inputs, device=FLAGS.device)

    if FLAGS.weights is not None:
        model.load(FLAGS.weights, reset_optimizer=True)

    # image preprocess
    orig_img = Image.open(FLAGS.infer_image).convert('RGB')
    w, h = orig_img.size
    img = orig_img.resize((608, 608), Image.BICUBIC)
    img = np.array(img).astype('float32') / 255.0
    img -= np.array(IMAGE_MEAN)
    img /= np.array(IMAGE_STD)
    img = img.transpose((2, 0, 1))[np.newaxis, :]
    img_id = np.array([0]).astype('int64')[np.newaxis, :]
    img_shape = np.array([h, w]).astype('int32')[np.newaxis, :]

    _, bboxes = model.test_batch([img_id, img_shape, img])

    vis_img = draw_bbox(orig_img, cat2name, bboxes, FLAGS.draw_threshold)
    save_name = get_save_image_name(FLAGS.output_dir, FLAGS.infer_image)
    logger.info("Detection bbox results save in {}".format(save_name))
    vis_img.save(save_name, quality=95)
예제 #20
0
파일: infer.py 프로젝트: wzzju/hapi
def main():
    place = set_device(FLAGS.device)
    fluid.enable_dygraph(place) if FLAGS.dynamic else None

    # Generators
    g_AB = Generator()
    g_BA = Generator()
    g = GeneratorCombine(g_AB, g_BA, is_train=False)

    im_shape = [-1, 3, 256, 256]
    input_A = Input(im_shape, 'float32', 'input_A')
    input_B = Input(im_shape, 'float32', 'input_B')
    g.prepare(inputs=[input_A, input_B], device=FLAGS.device)
    g.load(FLAGS.init_model, skip_mismatch=True, reset_optimizer=True)

    out_path = FLAGS.output + "/single"
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    for f in glob.glob(FLAGS.input):
        image_name = os.path.basename(f)
        image = Image.open(f).convert('RGB')
        image = image.resize((256, 256), Image.BICUBIC)
        image = np.array(image) / 127.5 - 1

        image = image[:, :, 0:3].astype("float32")
        data = image.transpose([2, 0, 1])[np.newaxis, :]

        if FLAGS.input_style == "A":
            _, fake, _, _ = g.test_batch([data, data])

        if FLAGS.input_style == "B":
            fake, _, _, _ = g.test_batch([data, data])

        fake = np.squeeze(fake[0]).transpose([1, 2, 0])

        opath = "{}/fake{}{}".format(out_path, FLAGS.input_style, image_name)
        imsave(opath, ((fake + 1) * 127.5).astype(np.uint8))
        print("transfer {} to {}".format(f, opath))
예제 #21
0
def infer():
    fluid.enable_dygraph(device)
    processor = SentaProcessor(data_dir=args.data_dir,
                               vocab_path=args.vocab_path,
                               random_seed=args.random_seed)

    infer_data_generator = processor.data_generator(
        batch_size=args.batch_size,
        padding_size=args.padding_size,
        places=device,
        phase='infer',
        epoch=1,
        shuffle=False)
    if args.model_type == 'cnn_net':
        model_infer = CNN(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'bow_net':
        model_infer = BOW(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'gru_net':
        model_infer = GRU(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'bigru_net':
        model_infer = BiGRU(args.vocab_size, args.batch_size,
                            args.padding_size)

    print('Do inferring ...... ')
    inputs = [Input([None, None], 'int64', name='doc')]
    model_infer.prepare(None,
                        CrossEntropy(),
                        Accuracy(topk=(1, )),
                        inputs,
                        device=device)
    model_infer.load(args.checkpoints, reset_optimizer=True)
    preds = model_infer.predict(test_data=infer_data_generator)
    preds = np.array(preds[0]).reshape((-1, 2))

    if args.output_dir:
        with open(os.path.join(args.output_dir, 'predictions.json'), 'w') as w:

            for p in range(len(preds)):
                label = np.argmax(preds[p])
                result = json.dumps({
                    'index': p,
                    'label': label,
                    'probs': preds[p].tolist()
                })
                w.write(result + '\n')
        print('Predictions saved at ' +
              os.path.join(args.output_dir, 'predictions.json'))
예제 #22
0
def main(FLAGS):
    device = set_device("gpu" if FLAGS.use_gpu else "cpu")
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    model = Seq2SeqAttModel(encoder_size=FLAGS.encoder_size,
                            decoder_size=FLAGS.decoder_size,
                            emb_dim=FLAGS.embedding_dim,
                            num_classes=FLAGS.num_classes)

    lr = FLAGS.lr
    if FLAGS.lr_decay_strategy == "piecewise_decay":
        learning_rate = fluid.layers.piecewise_decay([200000, 250000],
                                                     [lr, lr * 0.1, lr * 0.01])
    else:
        learning_rate = lr
    grad_clip = fluid.clip.GradientClipByGlobalNorm(FLAGS.gradient_clip)
    optimizer = fluid.optimizer.Adam(learning_rate=learning_rate,
                                     parameter_list=model.parameters(),
                                     grad_clip=grad_clip)

    # yapf: disable
    inputs = [
        Input([None,1,48,384], "float32", name="pixel"),
        Input([None, None], "int64", name="label_in"),
    ]
    labels = [
        Input([None, None], "int64", name="label_out"),
        Input([None, None], "float32", name="mask"),
    ]
    # yapf: enable

    model.prepare(optimizer,
                  WeightCrossEntropy(),
                  SeqAccuracy(),
                  inputs=inputs,
                  labels=labels)

    train_dataset = data.train()
    train_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(),
         data.PadTarget()])
    train_sampler = data.BatchSampler(train_dataset,
                                      batch_size=FLAGS.batch_size,
                                      shuffle=True)
    train_loader = fluid.io.DataLoader(train_dataset,
                                       batch_sampler=train_sampler,
                                       places=device,
                                       num_workers=FLAGS.num_workers,
                                       return_list=True,
                                       collate_fn=train_collate_fn)
    test_dataset = data.test()
    test_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(),
         data.PadTarget()])
    test_sampler = data.BatchSampler(test_dataset,
                                     batch_size=FLAGS.batch_size,
                                     drop_last=False,
                                     shuffle=False)
    test_loader = fluid.io.DataLoader(test_dataset,
                                      batch_sampler=test_sampler,
                                      places=device,
                                      num_workers=0,
                                      return_list=True,
                                      collate_fn=test_collate_fn)

    model.fit(train_data=train_loader,
              eval_data=test_loader,
              epochs=FLAGS.epoch,
              save_dir=FLAGS.checkpoint_path,
              callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
예제 #23
0
파일: eval.py 프로젝트: juncaipeng/hapi
def test_bmn(args):
    device = set_device(args.device)
    fluid.enable_dygraph(device) if args.dynamic else None

    #config setting
    config = parse_config(args.config_file)
    eval_cfg = merge_configs(config, 'test', vars(args))

    feat_dim = config.MODEL.feat_dim
    tscale = config.MODEL.tscale
    dscale = config.MODEL.dscale
    prop_boundary_ratio = config.MODEL.prop_boundary_ratio
    num_sample = config.MODEL.num_sample
    num_sample_perbin = config.MODEL.num_sample_perbin

    #input and video index
    inputs = [
        Input(
            [None, config.MODEL.feat_dim, config.MODEL.tscale],
            'float32',
            name='feat_input')
    ]
    gt_iou_map = Input(
        [None, config.MODEL.dscale, config.MODEL.tscale],
        'float32',
        name='gt_iou_map')
    gt_start = Input([None, config.MODEL.tscale], 'float32', name='gt_start')
    gt_end = Input([None, config.MODEL.tscale], 'float32', name='gt_end')
    video_idx = Input([None, 1], 'int64', name='video_idx')
    labels = [gt_iou_map, gt_start, gt_end, video_idx]

    #data
    eval_dataset = BmnDataset(eval_cfg, 'test')

    #model
    model = bmn(tscale,
                dscale,
                prop_boundary_ratio,
                num_sample,
                num_sample_perbin,
                pretrained=args.weights is None)
    model.prepare(
        loss_function=BmnLoss(tscale, dscale),
        metrics=BmnMetric(
            config, mode='test'),
        inputs=inputs,
        labels=labels,
        device=device)

    #load checkpoint
    if args.weights is not None:
        assert os.path.exists(args.weights + '.pdparams'), \
            "Given weight dir {} not exist.".format(args.weights)
        logger.info('load test weights from {}'.format(args.weights))
        model.load(args.weights)

    model.evaluate(
        eval_data=eval_dataset,
        batch_size=eval_cfg.TEST.batch_size,
        num_workers=eval_cfg.TEST.num_workers,
        log_freq=args.log_interval)

    logger.info("[EVAL] eval finished")
예제 #24
0
파일: main.py 프로젝트: wzzju/hapi
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    train_transform = Compose([
        GroupScale(),
        GroupMultiScaleCrop(),
        GroupRandomCrop(),
        GroupRandomFlip(),
        NormalizeImage()
    ])
    train_dataset = KineticsDataset(
        file_list=os.path.join(FLAGS.data, 'train_10.list'),
        pickle_dir=os.path.join(FLAGS.data, 'train_10'),
        label_list=os.path.join(FLAGS.data, 'label_list'),
        transform=train_transform)
    val_transform = Compose(
        [GroupScale(), GroupCenterCrop(),
         NormalizeImage()])
    val_dataset = KineticsDataset(
        file_list=os.path.join(FLAGS.data, 'val_10.list'),
        pickle_dir=os.path.join(FLAGS.data, 'val_10'),
        label_list=os.path.join(FLAGS.data, 'label_list'),
        mode='val',
        transform=val_transform)

    pretrained = FLAGS.eval_only and FLAGS.weights is None
    model = tsm_resnet50(num_classes=train_dataset.num_classes,
                         pretrained=pretrained)

    step_per_epoch = int(len(train_dataset) / FLAGS.batch_size \
                         / ParallelEnv().nranks)
    optim = make_optimizer(step_per_epoch, model.parameters())

    inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    model.prepare(optim,
                  CrossEntropy(),
                  metrics=Accuracy(topk=(1, 5)),
                  inputs=inputs,
                  labels=labels,
                  device=FLAGS.device)

    if FLAGS.eval_only:
        if FLAGS.weights is not None:
            model.load(FLAGS.weights, reset_optimizer=True)

        model.evaluate(val_dataset,
                       batch_size=FLAGS.batch_size,
                       num_workers=FLAGS.num_workers)
        return

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    model.fit(train_data=train_dataset,
              eval_data=val_dataset,
              epochs=FLAGS.epoch,
              batch_size=FLAGS.batch_size,
              save_dir=FLAGS.save_dir or 'tsm_checkpoint',
              num_workers=FLAGS.num_workers,
              drop_last=True,
              shuffle=True)
예제 #25
0
파일: predict.py 프로젝트: wzzju/hapi
def do_predict(args):
    device = set_device("gpu" if args.use_gpu else "cpu")
    fluid.enable_dygraph(device) if args.eager_run else None

    # define model
    inputs = [
        Input([None, None], "int64", name="src_word"),
        Input([None], "int64", name="src_length"),
    ]

    # def dataloader
    dataset = Seq2SeqDataset(
        fpattern=args.infer_file,
        src_vocab_fpath=args.vocab_prefix + "." + args.src_lang,
        trg_vocab_fpath=args.vocab_prefix + "." + args.tar_lang,
        token_delimiter=None,
        start_mark="<s>",
        end_mark="</s>",
        unk_mark="<unk>")
    trg_idx2word = Seq2SeqDataset.load_dict(dict_path=args.vocab_prefix + "." +
                                            args.tar_lang,
                                            reverse=True)
    (args.src_vocab_size, args.trg_vocab_size, bos_id, eos_id,
     unk_id) = dataset.get_vocab_summary()
    batch_sampler = Seq2SeqBatchSampler(dataset=dataset,
                                        use_token_batch=False,
                                        batch_size=args.batch_size)
    data_loader = DataLoader(dataset=dataset,
                             batch_sampler=batch_sampler,
                             places=device,
                             collate_fn=partial(prepare_infer_input,
                                                bos_id=bos_id,
                                                eos_id=eos_id,
                                                pad_id=eos_id),
                             num_workers=0,
                             return_list=True)

    model_maker = AttentionInferModel if args.attention else BaseInferModel
    model = model_maker(args.src_vocab_size,
                        args.tar_vocab_size,
                        args.hidden_size,
                        args.hidden_size,
                        args.num_layers,
                        args.dropout,
                        bos_id=bos_id,
                        eos_id=eos_id,
                        beam_size=args.beam_size,
                        max_out_len=256)

    model.prepare(inputs=inputs, device=device)

    # load the trained model
    assert args.reload_model, (
        "Please set reload_model to load the infer model.")
    model.load(args.reload_model)

    # TODO(guosheng): use model.predict when support variant length
    with io.open(args.infer_output_file, 'w', encoding='utf-8') as f:
        for data in data_loader():
            finished_seq = model.test_batch(inputs=flatten(data))[0]
            finished_seq = finished_seq[:, :, np.newaxis] if len(
                finished_seq.shape) == 2 else finished_seq
            finished_seq = np.transpose(finished_seq, [0, 2, 1])
            for ins in finished_seq:
                for beam_idx, beam in enumerate(ins):
                    id_list = post_process_seq(beam, bos_id, eos_id)
                    word_list = [trg_idx2word[id] for id in id_list]
                    sequence = " ".join(word_list) + "\n"
                    f.write(sequence)
                    break
예제 #26
0
파일: main.py 프로젝트: juncaipeng/hapi
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    inputs = [
        Input(
            [None, 1], 'int64', name='img_id'), Input(
                [None, 2], 'int32', name='img_shape'), Input(
                    [None, 3, None, None], 'float32', name='image')
    ]

    labels = [
        Input(
            [None, NUM_MAX_BOXES, 4], 'float32', name='gt_bbox'), Input(
                [None, NUM_MAX_BOXES], 'int32', name='gt_label'), Input(
                    [None, NUM_MAX_BOXES], 'float32', name='gt_score')
    ]

    if not FLAGS.eval_only:  # training mode
        train_transform = Compose([
            ColorDistort(), RandomExpand(), RandomCrop(), RandomFlip(),
            NormalizeBox(), PadBox(), BboxXYXY2XYWH()
        ])

        train_collate_fn = BatchCompose([RandomShape(), NormalizeImage()])
        dataset = COCODataset(
            dataset_dir=FLAGS.data,
            anno_path='annotations/instances_train2017.json',
            image_dir='train2017',
            with_background=False,
            mixup=True,
            transform=train_transform)
        batch_sampler = DistributedBatchSampler(
            dataset, batch_size=FLAGS.batch_size, shuffle=True, drop_last=True)
        loader = DataLoader(
            dataset,
            batch_sampler=batch_sampler,
            places=device,
            num_workers=FLAGS.num_workers,
            return_list=True,
            collate_fn=train_collate_fn)
    else:  # evaluation mode
        eval_transform = Compose([
            ResizeImage(target_size=608), NormalizeBox(), PadBox(),
            BboxXYXY2XYWH()
        ])

        eval_collate_fn = BatchCompose([NormalizeImage()])
        dataset = COCODataset(
            dataset_dir=FLAGS.data,
            anno_path='annotations/instances_val2017.json',
            image_dir='val2017',
            with_background=False,
            transform=eval_transform)
        # batch_size can only be 1 in evaluation for YOLOv3
        # prediction bbox is a LoDTensor
        batch_sampler = DistributedBatchSampler(
            dataset, batch_size=1, shuffle=False, drop_last=False)
        loader = DataLoader(
            dataset,
            batch_sampler=batch_sampler,
            places=device,
            num_workers=FLAGS.num_workers,
            return_list=True,
            collate_fn=eval_collate_fn)

    pretrained = FLAGS.eval_only and FLAGS.weights is None
    model = yolov3_darknet53(
        num_classes=dataset.num_classes,
        model_mode='eval' if FLAGS.eval_only else 'train',
        pretrained=pretrained)

    if FLAGS.pretrain_weights and not FLAGS.eval_only:
        model.load(
            FLAGS.pretrain_weights, skip_mismatch=True, reset_optimizer=True)

    optim = make_optimizer(
        len(batch_sampler), parameter_list=model.parameters())

    model.prepare(
        optim,
        YoloLoss(num_classes=dataset.num_classes),
        inputs=inputs,
        labels=labels,
        device=FLAGS.device)

    # NOTE: we implement COCO metric of YOLOv3 model here, separately
    # from 'prepare' and 'fit' framework for follwing reason:
    # 1. YOLOv3 network structure is different between 'train' and
    # 'eval' mode, in 'eval' mode, output prediction bbox is not the
    # feature map used for YoloLoss calculating
    # 2. COCO metric behavior is also different from defined Metric
    # for COCO metric should not perform accumulate in each iteration
    # but only accumulate at the end of an epoch
    if FLAGS.eval_only:
        if FLAGS.weights is not None:
            model.load(FLAGS.weights, reset_optimizer=True)
        preds = model.predict(loader, stack_outputs=False)
        _, _, _, img_ids, bboxes = preds

        anno_path = os.path.join(FLAGS.data,
                                 'annotations/instances_val2017.json')
        coco_metric = COCOMetric(anno_path=anno_path, with_background=False)
        for img_id, bbox in zip(img_ids, bboxes):
            coco_metric.update(img_id, bbox)
        coco_metric.accumulate()
        coco_metric.reset()
        return

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    save_dir = FLAGS.save_dir or 'yolo_checkpoint'

    model.fit(train_data=loader,
              epochs=FLAGS.epoch - FLAGS.no_mixup_epoch,
              save_dir=os.path.join(save_dir, "mixup"),
              save_freq=10)

    # do not use image mixup transfrom in the last FLAGS.no_mixup_epoch epoches
    dataset.mixup = False
    model.fit(train_data=loader,
              epochs=FLAGS.no_mixup_epoch,
              save_dir=os.path.join(save_dir, "no_mixup"),
              save_freq=5)
예제 #27
0
def main():

    config = Config(yaml_file="./bert.yaml")
    config.build()
    config.Print()

    device = set_device("gpu" if config.use_cuda else "cpu")
    fluid.enable_dygraph(device)

    bert_config = BertConfig(config.bert_config_path)
    bert_config.print_config()

    tokenizer = tokenization.FullTokenizer(vocab_file=config.vocab_path,
                                           do_lower_case=config.do_lower_case)

    def mnli_line_processor(line_id, line):
        if line_id == "0":
            return None
        uid = tokenization.convert_to_unicode(line[0])
        text_a = tokenization.convert_to_unicode(line[8])
        text_b = tokenization.convert_to_unicode(line[9])
        label = tokenization.convert_to_unicode(line[-1])
        if label not in ["contradiction", "entailment", "neutral"]:
            label = "contradiction"
        return BertInputExample(uid=uid,
                                text_a=text_a,
                                text_b=text_b,
                                label=label)

    train_dataloader = BertDataLoader(
        "./data/glue_data/MNLI/train.tsv",
        tokenizer,
        ["contradiction", "entailment", "neutral"],
        max_seq_length=config.max_seq_len,
        batch_size=config.batch_size,
        line_processor=mnli_line_processor,
        mode="leveldb",
    )

    test_dataloader = BertDataLoader(
        "./data/glue_data/MNLI/dev_matched.tsv",
        tokenizer, ["contradiction", "entailment", "neutral"],
        max_seq_length=config.max_seq_len,
        batch_size=config.batch_size,
        line_processor=mnli_line_processor,
        shuffle=False,
        phase="predict")

    trainer_count = fluid.dygraph.parallel.Env().nranks
    num_train_examples = len(train_dataloader.dataset)
    max_train_steps = config.epoch * num_train_examples // config.batch_size // trainer_count
    warmup_steps = int(max_train_steps * config.warmup_proportion)

    print("Trainer count: %d" % trainer_count)
    print("Num train examples: %d" % num_train_examples)
    print("Max train steps: %d" % max_train_steps)
    print("Num warmup steps: %d" % warmup_steps)

    inputs = [
        Input([None, None], 'int64', name='src_ids'),
        Input([None, None], 'int64', name='pos_ids'),
        Input([None, None], 'int64', name='sent_ids'),
        Input([None, None, 1], 'float32', name='input_mask')
    ]

    labels = [Input([None, 1], 'int64', name='label')]

    cls_model = ClsModelLayer(config,
                              bert_config,
                              len(["contradiction", "entailment", "neutral"]),
                              return_pooled_out=True)

    optimizer = make_optimizer(warmup_steps=warmup_steps,
                               num_train_steps=max_train_steps,
                               learning_rate=config.learning_rate,
                               weight_decay=config.weight_decay,
                               scheduler=config.lr_scheduler,
                               model=cls_model,
                               loss_scaling=config.loss_scaling,
                               parameter_list=cls_model.parameters())

    cls_model.prepare(optimizer,
                      SoftmaxWithCrossEntropy(),
                      Accuracy(topk=(1, 2)),
                      inputs,
                      labels,
                      device=device)

    cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert",
                              reset_optimizer=True)

    # do train
    cls_model.fit(train_data=train_dataloader.dataloader,
                  epochs=config.epoch,
                  save_dir=config.checkpoints)

    # do eval
    cls_model.evaluate(eval_data=test_dataloader.dataloader,
                       batch_size=config.batch_size)
예제 #28
0
파일: train.py 프로젝트: wzzju/hapi
def do_train(args):
    device = set_device("gpu" if args.use_cuda else "cpu")
    fluid.enable_dygraph(device) if args.eager_run else None

    # set seed for CE
    random_seed = eval(str(args.random_seed))
    if random_seed is not None:
        fluid.default_main_program().random_seed = random_seed
        fluid.default_startup_program().random_seed = random_seed

    # define inputs
    inputs = [
        Input([None, None], "int64", name="src_word"),
        Input([None, None], "int64", name="src_pos"),
        Input([None, args.n_head, None, None],
              "float32",
              name="src_slf_attn_bias"),
        Input([None, None], "int64", name="trg_word"),
        Input([None, None], "int64", name="trg_pos"),
        Input([None, args.n_head, None, None],
              "float32",
              name="trg_slf_attn_bias"),
        Input([None, args.n_head, None, None],
              "float32",
              name="trg_src_attn_bias"),
    ]
    labels = [
        Input([None, 1], "int64", name="label"),
        Input([None, 1], "float32", name="weight"),
    ]

    # def dataloader
    (train_loader,
     train_steps_fn), (eval_loader,
                       eval_steps_fn) = create_data_loader(args, device)

    # define model
    transformer = Transformer(args.src_vocab_size, args.trg_vocab_size,
                              args.max_length + 1, args.n_layer, args.n_head,
                              args.d_key, args.d_value, args.d_model,
                              args.d_inner_hid, args.prepostprocess_dropout,
                              args.attention_dropout, args.relu_dropout,
                              args.preprocess_cmd, args.postprocess_cmd,
                              args.weight_sharing, args.bos_idx, args.eos_idx)

    transformer.prepare(fluid.optimizer.Adam(
        learning_rate=fluid.layers.noam_decay(
            args.d_model, args.warmup_steps, learning_rate=args.learning_rate),
        beta1=args.beta1,
        beta2=args.beta2,
        epsilon=float(args.eps),
        parameter_list=transformer.parameters()),
                        CrossEntropyCriterion(args.label_smooth_eps),
                        inputs=inputs,
                        labels=labels,
                        device=device)

    ## init from some checkpoint, to resume the previous training
    if args.init_from_checkpoint:
        transformer.load(args.init_from_checkpoint)
    ## init from some pretrain models, to better solve the current task
    if args.init_from_pretrain_model:
        transformer.load(args.init_from_pretrain_model, reset_optimizer=True)

    # model train
    transformer.fit(train_data=train_loader,
                    eval_data=eval_loader,
                    epochs=args.epoch,
                    eval_freq=1,
                    save_freq=1,
                    save_dir=args.save_model,
                    callbacks=[
                        TrainCallback(args,
                                      train_steps_fn=train_steps_fn,
                                      eval_steps_fn=eval_steps_fn)
                    ])
예제 #29
0
파일: train.py 프로젝트: wzzju/hapi
def main():
    place = set_device(FLAGS.device)
    fluid.enable_dygraph(place) if FLAGS.dynamic else None

    # Generators
    g_AB = Generator()
    g_BA = Generator()

    # Discriminators
    d_A = Discriminator()
    d_B = Discriminator()

    g = GeneratorCombine(g_AB, g_BA, d_A, d_B)

    da_params = d_A.parameters()
    db_params = d_B.parameters()
    g_params = g_AB.parameters() + g_BA.parameters()

    da_optimizer = opt(da_params)
    db_optimizer = opt(db_params)
    g_optimizer = opt(g_params)

    im_shape = [None, 3, 256, 256]
    input_A = Input(im_shape, 'float32', 'input_A')
    input_B = Input(im_shape, 'float32', 'input_B')
    fake_A = Input(im_shape, 'float32', 'fake_A')
    fake_B = Input(im_shape, 'float32', 'fake_B')

    g_AB.prepare(inputs=[input_A], device=FLAGS.device)
    g_BA.prepare(inputs=[input_B], device=FLAGS.device)

    g.prepare(g_optimizer,
              GLoss(),
              inputs=[input_A, input_B],
              device=FLAGS.device)
    d_A.prepare(da_optimizer,
                DLoss(),
                inputs=[input_B, fake_B],
                device=FLAGS.device)
    d_B.prepare(db_optimizer,
                DLoss(),
                inputs=[input_A, fake_A],
                device=FLAGS.device)

    if FLAGS.resume:
        g.load(FLAGS.resume)

    loader_A = paddle.io.DataLoader(data.DataA(),
                                    places=place,
                                    shuffle=True,
                                    return_list=True,
                                    batch_size=FLAGS.batch_size)
    loader_B = paddle.io.DataLoader(data.DataB(),
                                    places=place,
                                    shuffle=True,
                                    return_list=True,
                                    batch_size=FLAGS.batch_size)

    A_pool = data.ImagePool()
    B_pool = data.ImagePool()

    for epoch in range(FLAGS.epoch):
        for i, (data_A, data_B) in enumerate(zip(loader_A, loader_B)):
            data_A = data_A[0][0] if not FLAGS.dynamic else data_A[0]
            data_B = data_B[0][0] if not FLAGS.dynamic else data_B[0]
            start = time.time()

            fake_B = g_AB.test_batch(data_A)[0]
            fake_A = g_BA.test_batch(data_B)[0]
            g_loss = g.train_batch([data_A, data_B])[0]
            fake_pb = B_pool.get(fake_B)
            da_loss = d_A.train_batch([data_B, fake_pb])[0]

            fake_pa = A_pool.get(fake_A)
            db_loss = d_B.train_batch([data_A, fake_pa])[0]

            t = time.time() - start
            if i % 20 == 0:
                print("epoch: {} | step: {:3d} | g_loss: {:.4f} | " \
                      "da_loss: {:.4f} | db_loss: {:.4f} | s/step {:.4f}".
                      format(epoch, i, g_loss[0], da_loss[0], db_loss[0], t))
        g.save('{}/{}'.format(FLAGS.checkpoint_path, epoch))
예제 #30
0

class Mnist(Model):
    def __init__(self, name_scope):
        super(Mnist, self).__init__()
        self.fc = Linear(input_dim=784, output_dim=10, act="softmax")

    # 定义网络结构的前向计算过程
    def forward(self, inputs):
        inputs = paddle.tensor.reshape(inputs, shape=[-1, 784])
        outputs = self.fc(inputs)
        return outputs


# 定义输入数据格式
inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
labels = [Input([None, 1], 'int64', name='label')]

# 声明网络结构
model = Mnist("mnist")
optimizer = paddle.optimizer.SGDOptimizer(learning_rate=0.001,
                                          parameter_list=model.parameters())
# 使用高层API,prepare() 完成训练的配置
model.prepare(optimizer,
              CrossEntropy(),
              Accuracy(),
              inputs,
              labels,
              device='cpu')

# 定义数据读取器