Esempio n. 1
0
    def test_static_save_dynamic_load(self):
        path = tempfile.mkdtemp()

        model = MyModel()
        inputs = [Input([None, 20], 'float32', name='x')]
        labels = [Input([None, 1], 'int64', name='label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(inputs=inputs,
                      optimizer=optim,
                      loss_function=CrossEntropy(average=False),
                      labels=labels)
        model.save(path + '/test')

        device = set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        model = MyModel()
        inputs = [Input([None, 20], 'float32', name='x')]
        labels = [Input([None, 1], 'int64', name='label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(inputs=inputs,
                      optimizer=optim,
                      loss_function=CrossEntropy(average=False),
                      labels=labels)
        model.load(path + '/test')
        shutil.rmtree(path)
        fluid.disable_dygraph()
Esempio n. 2
0
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    train_dataset = MnistDataset(mode='train')
    val_dataset = MnistDataset(mode='test')

    inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    model = LeNet()
    optim = Momentum(
        learning_rate=FLAGS.lr, momentum=.9, parameter_list=model.parameters())

    model.prepare(
        optim,
        CrossEntropy(),
        Accuracy(topk=(1, 2)),
        inputs,
        labels,
        device=FLAGS.device)

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    if FLAGS.eval_only:
        model.evaluate(val_dataset, batch_size=FLAGS.batch_size)
        return

    model.fit(train_dataset,
              val_dataset,
              epochs=FLAGS.epoch,
              batch_size=FLAGS.batch_size,
              save_dir=FLAGS.output_dir)
Esempio n. 3
0
File: main.py Progetto: wzzju/hapi
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    model_list = [x for x in models.__dict__["__all__"]]
    assert FLAGS.arch in model_list, "Expected FLAGS.arch in {}, but received {}".format(
        model_list, FLAGS.arch)
    model = models.__dict__[FLAGS.arch](
        pretrained=FLAGS.eval_only and not FLAGS.resume)

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    inputs = [Input([None, 3, 224, 224], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    train_dataset = ImageNetDataset(os.path.join(FLAGS.data, 'train'),
                                    mode='train',
                                    image_size=FLAGS.image_size,
                                    resize_short_size=FLAGS.resize_short_size)

    val_dataset = ImageNetDataset(os.path.join(FLAGS.data, 'val'),
                                  mode='val',
                                  image_size=FLAGS.image_size,
                                  resize_short_size=FLAGS.resize_short_size)

    optim = make_optimizer(np.ceil(
        len(train_dataset) * 1. / FLAGS.batch_size / ParallelEnv().nranks),
                           parameter_list=model.parameters())

    model.prepare(optim, CrossEntropy(), Accuracy(topk=(1, 5)), inputs, labels,
                  FLAGS.device)

    if FLAGS.eval_only:
        model.evaluate(val_dataset,
                       batch_size=FLAGS.batch_size,
                       num_workers=FLAGS.num_workers)
        return

    output_dir = os.path.join(
        FLAGS.output_dir, FLAGS.arch,
        time.strftime('%Y-%m-%d-%H-%M', time.localtime()))
    if ParallelEnv().local_rank == 0 and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    model.fit(train_dataset,
              val_dataset,
              batch_size=FLAGS.batch_size,
              epochs=FLAGS.epoch,
              save_dir=output_dir,
              num_workers=FLAGS.num_workers)
Esempio n. 4
0
    def test_train_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            optim = fluid.optimizer.SGD(learning_rate=0.001,
                                        parameter_list=m.parameters())
            m.train()
            output = m(to_variable(data))
            l = to_variable(label)
            loss = fluid.layers.cross_entropy(output, l)
            avg_loss = fluid.layers.reduce_sum(loss)
            avg_loss.backward()
            optim.minimize(avg_loss)
            m.clear_gradients()
            fluid.disable_dygraph()
            return avg_loss.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            model = MyModel()

            optim2 = fluid.optimizer.SGD(learning_rate=0.001,
                                         parameter_list=model.parameters())

            inputs = [Input([None, dim], 'float32', name='x')]
            labels = [Input([None, 1], 'int64', name='label')]
            model.prepare(optim2,
                          loss_function=CrossEntropy(average=False),
                          inputs=inputs,
                          labels=labels,
                          device=device)
            loss, = model.train_batch([data], [label])

            np.testing.assert_allclose(loss.flatten(), ref.flatten())
            fluid.disable_dygraph() if dynamic else None
Esempio n. 5
0
    def test_cross_entropy(self):
        class_num = 100
        batch_size = 128
        inputs = [randomize_probability(128, class_num) for _ in range(2)]

        labels = [
            np.random.randint(0, class_num, (batch_size, 1), dtype="int64")
            for _ in range(2)
        ]

        gt_out = [numpy_ce(inputs[i], labels[i]) for i in range(2)]

        fluid.enable_dygraph()
        cross_entropy = CrossEntropy()
        out = cross_entropy(
            [fluid.dygraph.to_variable(x) for x in inputs],
            [fluid.dygraph.to_variable(label) for label in labels])
        out = [o.numpy() for o in out]

        for o, g in zip(out, gt_out):
            np.testing.assert_allclose(o, g, atol=1e-5)
Esempio n. 6
0
    def test_static_multiple_gpus(self):
        device = set_device('gpu')

        fluid.enable_dygraph(device)
        im_shape = (-1, 1, 28, 28)
        batch_size = 128

        inputs = [Input(im_shape, 'float32', name='image')]
        labels = [Input([None, 1], 'int64', name='label')]

        train_dataset = MnistDataset(mode='train')
        val_dataset = MnistDataset(mode='test')
        test_dataset = MnistDataset(mode='test', return_label=False)

        model = LeNet()
        optim = fluid.optimizer.Momentum(learning_rate=0.001,
                                         momentum=.9,
                                         parameter_list=model.parameters())
        loss = CrossEntropy()
        model.prepare(optim, loss, Accuracy(), inputs, labels, device=device)
        cbk = ProgBarLogger(50)

        model.fit(train_dataset,
                  val_dataset,
                  epochs=2,
                  batch_size=batch_size,
                  callbacks=cbk)

        eval_result = model.evaluate(val_dataset, batch_size=batch_size)

        output = model.predict(test_dataset,
                               batch_size=batch_size,
                               stack_outputs=True)

        np.testing.assert_equal(output[0].shape[0], len(test_dataset))

        acc = compute_accuracy(output[0], val_dataset.labels)

        np.testing.assert_allclose(acc, eval_result['acc'])
Esempio n. 7
0
    def fit(self, dynamic):
        fluid.enable_dygraph(self.device) if dynamic else None
        seed = 333
        fluid.default_startup_program().random_seed = seed
        fluid.default_main_program().random_seed = seed

        model = LeNet()
        optim_new = fluid.optimizer.Adam(learning_rate=0.001,
                                         parameter_list=model.parameters())
        model.prepare(optim_new,
                      loss_function=CrossEntropy(average=False),
                      metrics=Accuracy(),
                      inputs=self.inputs,
                      labels=self.labels)
        model.fit(self.train_dataset, batch_size=64, shuffle=False)

        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)

        train_sampler = DistributedBatchSampler(self.train_dataset,
                                                batch_size=64,
                                                shuffle=False)
        val_sampler = DistributedBatchSampler(self.val_dataset,
                                              batch_size=64,
                                              shuffle=False)

        train_loader = fluid.io.DataLoader(self.train_dataset,
                                           batch_sampler=train_sampler,
                                           places=self.device,
                                           return_list=True)

        val_loader = fluid.io.DataLoader(self.val_dataset,
                                         batch_sampler=val_sampler,
                                         places=self.device,
                                         return_list=True)

        model.fit(train_loader, val_loader)
        fluid.disable_dygraph() if dynamic else None
Esempio n. 8
0
File: main.py Progetto: wzzju/hapi
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    train_transform = Compose([
        GroupScale(),
        GroupMultiScaleCrop(),
        GroupRandomCrop(),
        GroupRandomFlip(),
        NormalizeImage()
    ])
    train_dataset = KineticsDataset(
        file_list=os.path.join(FLAGS.data, 'train_10.list'),
        pickle_dir=os.path.join(FLAGS.data, 'train_10'),
        label_list=os.path.join(FLAGS.data, 'label_list'),
        transform=train_transform)
    val_transform = Compose(
        [GroupScale(), GroupCenterCrop(),
         NormalizeImage()])
    val_dataset = KineticsDataset(
        file_list=os.path.join(FLAGS.data, 'val_10.list'),
        pickle_dir=os.path.join(FLAGS.data, 'val_10'),
        label_list=os.path.join(FLAGS.data, 'label_list'),
        mode='val',
        transform=val_transform)

    pretrained = FLAGS.eval_only and FLAGS.weights is None
    model = tsm_resnet50(num_classes=train_dataset.num_classes,
                         pretrained=pretrained)

    step_per_epoch = int(len(train_dataset) / FLAGS.batch_size \
                         / ParallelEnv().nranks)
    optim = make_optimizer(step_per_epoch, model.parameters())

    inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    model.prepare(optim,
                  CrossEntropy(),
                  metrics=Accuracy(topk=(1, 5)),
                  inputs=inputs,
                  labels=labels,
                  device=FLAGS.device)

    if FLAGS.eval_only:
        if FLAGS.weights is not None:
            model.load(FLAGS.weights, reset_optimizer=True)

        model.evaluate(val_dataset,
                       batch_size=FLAGS.batch_size,
                       num_workers=FLAGS.num_workers)
        return

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    model.fit(train_data=train_dataset,
              eval_data=val_dataset,
              epochs=FLAGS.epoch,
              batch_size=FLAGS.batch_size,
              save_dir=FLAGS.save_dir or 'tsm_checkpoint',
              num_workers=FLAGS.num_workers,
              drop_last=True,
              shuffle=True)
Esempio n. 9
0
        inputs = paddle.tensor.reshape(inputs, shape=[-1, 784])
        outputs = self.fc(inputs)
        return outputs


# 定义输入数据格式
inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
labels = [Input([None, 1], 'int64', name='label')]

# 声明网络结构
model = Mnist("mnist")
optimizer = paddle.optimizer.SGDOptimizer(learning_rate=0.001,
                                          parameter_list=model.parameters())
# 使用高层API,prepare() 完成训练的配置
model.prepare(optimizer,
              CrossEntropy(),
              Accuracy(),
              inputs,
              labels,
              device='cpu')

# 定义数据读取器
train_dataset = MnistDataset(mode='example_train')
val_dataset = MnistDataset(mode='test')
# 启动训练
model.fit(train_dataset,
          val_dataset,
          batch_size=100,
          epochs=10,
          log_freq=100,
          save_dir="output/")