Ejemplo n.º 1
0
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    train_dataset = MnistDataset(mode='train')
    val_dataset = MnistDataset(mode='test')

    inputs = [Input([None, 784], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    model = MNIST()
    optim = Momentum(
        learning_rate=FLAGS.lr, momentum=.9, parameter_list=model.parameters())

    model.prepare(
        optim,
        CrossEntropy(),
        Accuracy(topk=(1, 2)),
        inputs,
        labels,
        device=FLAGS.device)
    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    model.fit(train_dataset,
              val_dataset,
              epochs=FLAGS.epoch,
              batch_size=FLAGS.batch_size,
              save_dir='mnist_checkpoint')
Ejemplo n.º 2
0
def train():
    fluid.enable_dygraph(device)
    processor = SentaProcessor(data_dir=args.data_dir,
                               vocab_path=args.vocab_path,
                               random_seed=args.random_seed)
    num_labels = len(processor.get_labels())

    num_train_examples = processor.get_num_examples(phase="train")

    max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count

    train_data_generator = processor.data_generator(
        batch_size=args.batch_size,
        padding_size=args.padding_size,
        places=device,
        phase='train',
        epoch=args.epoch,
        shuffle=False)

    eval_data_generator = processor.data_generator(
        batch_size=args.batch_size,
        padding_size=args.padding_size,
        places=device,
        phase='dev',
        epoch=args.epoch,
        shuffle=False)
    if args.model_type == 'cnn_net':
        model = CNN(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'bow_net':
        model = BOW(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'gru_net':
        model = GRU(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'bigru_net':
        model = BiGRU(args.vocab_size, args.batch_size, args.padding_size)

    optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr,
                                        parameter_list=model.parameters())

    inputs = [Input([None, None], 'int64', name='doc')]
    labels = [Input([None, 1], 'int64', name='label')]

    model.prepare(optimizer,
                  CrossEntropy(),
                  Accuracy(topk=(1, )),
                  inputs,
                  labels,
                  device=device)

    model.fit(train_data=train_data_generator,
              eval_data=eval_data_generator,
              batch_size=args.batch_size,
              epochs=args.epoch,
              save_dir=args.checkpoints,
              eval_freq=args.eval_freq,
              save_freq=args.save_freq)
Ejemplo n.º 3
0
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    model_list = [x for x in models.__dict__["__all__"]]
    assert FLAGS.arch in model_list, "Expected FLAGS.arch in {}, but received {}".format(
        model_list, FLAGS.arch)
    model = models.__dict__[FLAGS.arch](
        pretrained=FLAGS.eval_only and not FLAGS.resume)

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    inputs = [Input([None, 3, 224, 224], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    train_dataset = ImageNetDataset(os.path.join(FLAGS.data, 'train'),
                                    mode='train',
                                    image_size=FLAGS.image_size,
                                    resize_short_size=FLAGS.resize_short_size)

    val_dataset = ImageNetDataset(os.path.join(FLAGS.data, 'val'),
                                  mode='val',
                                  image_size=FLAGS.image_size,
                                  resize_short_size=FLAGS.resize_short_size)

    optim = make_optimizer(np.ceil(
        len(train_dataset) * 1. / FLAGS.batch_size / ParallelEnv().nranks),
                           parameter_list=model.parameters())

    model.prepare(optim, CrossEntropy(), Accuracy(topk=(1, 5)), inputs, labels,
                  FLAGS.device)

    if FLAGS.eval_only:
        model.evaluate(val_dataset,
                       batch_size=FLAGS.batch_size,
                       num_workers=FLAGS.num_workers)
        return

    output_dir = os.path.join(
        FLAGS.output_dir, FLAGS.arch,
        time.strftime('%Y-%m-%d-%H-%M', time.localtime()))
    if ParallelEnv().local_rank == 0 and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    model.fit(train_dataset,
              val_dataset,
              batch_size=FLAGS.batch_size,
              epochs=FLAGS.epoch,
              save_dir=output_dir,
              num_workers=FLAGS.num_workers)
Ejemplo n.º 4
0
def infer():
    fluid.enable_dygraph(device)
    processor = SentaProcessor(data_dir=args.data_dir,
                               vocab_path=args.vocab_path,
                               random_seed=args.random_seed)

    infer_data_generator = processor.data_generator(
        batch_size=args.batch_size,
        padding_size=args.padding_size,
        places=device,
        phase='infer',
        epoch=1,
        shuffle=False)
    if args.model_type == 'cnn_net':
        model_infer = CNN(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'bow_net':
        model_infer = BOW(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'gru_net':
        model_infer = GRU(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'bigru_net':
        model_infer = BiGRU(args.vocab_size, args.batch_size,
                            args.padding_size)

    print('Do inferring ...... ')
    inputs = [Input([None, None], 'int64', name='doc')]
    model_infer.prepare(None,
                        CrossEntropy(),
                        Accuracy(topk=(1, )),
                        inputs,
                        device=device)
    model_infer.load(args.checkpoints, reset_optimizer=True)
    preds = model_infer.predict(test_data=infer_data_generator)
    preds = np.array(preds[0]).reshape((-1, 2))

    if args.output_dir:
        with open(os.path.join(args.output_dir, 'predictions.json'), 'w') as w:

            for p in range(len(preds)):
                label = np.argmax(preds[p])
                result = json.dumps({
                    'index': p,
                    'label': label,
                    'probs': preds[p].tolist()
                })
                w.write(result + '\n')
        print('Predictions saved at ' +
              os.path.join(args.output_dir, 'predictions.json'))
Ejemplo n.º 5
0
    def fit(self, dynamic, is_mlp=False):
        device = set_device('gpu')
        fluid.enable_dygraph(device) if dynamic else None

        im_shape = (-1, 784)
        batch_size = 128

        inputs = [Input(im_shape, 'float32', name='image')]
        labels = [Input([None, 1], 'int64', name='label')]

        train_dataset = MnistDataset(mode='train')
        val_dataset = MnistDataset(mode='test')
        test_dataset = TestMnistDataset()

        model = MNIST() if not is_mlp else MLP()
        optim = fluid.optimizer.Momentum(learning_rate=0.01,
                                         momentum=.9,
                                         parameter_list=model.parameters())
        loss = CrossEntropy() if not is_mlp else MyCrossEntropy()
        model.prepare(optim, loss, Accuracy(), inputs, labels, device=device)
        cbk = ProgBarLogger(50)

        model.fit(train_dataset,
                  val_dataset,
                  epochs=2,
                  batch_size=batch_size,
                  callbacks=cbk)

        eval_result = model.evaluate(val_dataset, batch_size=batch_size)

        output = model.predict(test_dataset, batch_size=batch_size)

        np.testing.assert_equal(output[0].shape[0], len(test_dataset))

        acc = get_predict_accuracy(output[0], val_dataset.labels)

        np.testing.assert_allclose(acc, eval_result['acc'])
Ejemplo n.º 6
0
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    train_transform = Compose([
        GroupScale(),
        GroupMultiScaleCrop(),
        GroupRandomCrop(),
        GroupRandomFlip(),
        NormalizeImage()
    ])
    train_dataset = KineticsDataset(
        file_list=os.path.join(FLAGS.data, 'train_10.list'),
        pickle_dir=os.path.join(FLAGS.data, 'train_10'),
        label_list=os.path.join(FLAGS.data, 'label_list'),
        transform=train_transform)
    val_transform = Compose(
        [GroupScale(), GroupCenterCrop(),
         NormalizeImage()])
    val_dataset = KineticsDataset(
        file_list=os.path.join(FLAGS.data, 'val_10.list'),
        pickle_dir=os.path.join(FLAGS.data, 'val_10'),
        label_list=os.path.join(FLAGS.data, 'label_list'),
        mode='val',
        transform=val_transform)

    pretrained = FLAGS.eval_only and FLAGS.weights is None
    model = tsm_resnet50(num_classes=train_dataset.num_classes,
                         pretrained=pretrained)

    step_per_epoch = int(len(train_dataset) / FLAGS.batch_size \
                         / ParallelEnv().nranks)
    optim = make_optimizer(step_per_epoch, model.parameters())

    inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    model.prepare(optim,
                  CrossEntropy(),
                  metrics=Accuracy(topk=(1, 5)),
                  inputs=inputs,
                  labels=labels,
                  device=FLAGS.device)

    if FLAGS.eval_only:
        if FLAGS.weights is not None:
            model.load(FLAGS.weights, reset_optimizer=True)

        model.evaluate(val_dataset,
                       batch_size=FLAGS.batch_size,
                       num_workers=FLAGS.num_workers)
        return

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    model.fit(train_data=train_dataset,
              eval_data=val_dataset,
              epochs=FLAGS.epoch,
              batch_size=FLAGS.batch_size,
              save_dir='tsm_checkpoint',
              num_workers=FLAGS.num_workers,
              drop_last=True,
              shuffle=True)
Ejemplo n.º 7
0
def main():
    """
    Main Function
    """
    args = Config(yaml_file='./config.yaml')
    args.build()
    args.Print()
    if not (args.do_train or args.do_val or args.do_infer):
        raise ValueError("For args `do_train`, `do_val` and `do_infer`, at "
                         "least one of them must be True.")

    place = set_device("gpu" if args.use_cuda else "cpu")
    fluid.enable_dygraph(place)

    processor = EmoTectProcessor(data_dir=args.data_dir,
                                 vocab_path=args.vocab_path,
                                 random_seed=args.random_seed)
    num_labels = args.num_labels

    if args.model_type == 'cnn_net':
        model = CNN(args.vocab_size, args.max_seq_len)
    elif args.model_type == 'bow_net':
        model = BOW(args.vocab_size, args.max_seq_len)
    elif args.model_type == 'lstm_net':
        model = LSTM(args.vocab_size, args.max_seq_len)
    elif args.model_type == 'gru_net':
        model = GRU(args.vocab_size, args.max_seq_len)
    elif args.model_type == 'bigru_net':
        model = BiGRU(args.vocab_size, args.batch_size, args.max_seq_len)
    else:
        raise ValueError("Unknown model type!")

    inputs = [Input([None, args.max_seq_len], 'int64', name='doc')]
    optimizer = None
    labels = None

    if args.do_train:
        train_data_generator = processor.data_generator(
            batch_size=args.batch_size,
            places=place,
            phase='train',
            epoch=args.epoch,
            padding_size=args.max_seq_len)

        num_train_examples = processor.get_num_examples(phase="train")
        max_train_steps = args.epoch * num_train_examples // args.batch_size + 1

        print("Num train examples: %d" % num_train_examples)
        print("Max train steps: %d" % max_train_steps)

        labels = [Input([None, 1], 'int64', name='label')]
        optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr,
                                            parameter_list=model.parameters())
        test_data_generator = None
        if args.do_val:
            test_data_generator = processor.data_generator(
                batch_size=args.batch_size,
                phase='dev',
                epoch=1,
                places=place,
                padding_size=args.max_seq_len)

    elif args.do_val:
        test_data_generator = processor.data_generator(
            batch_size=args.batch_size,
            phase='test',
            epoch=1,
            places=place,
            padding_size=args.max_seq_len)

    elif args.do_infer:
        infer_data_generator = processor.data_generator(
            batch_size=args.batch_size,
            phase='infer',
            epoch=1,
            places=place,
            padding_size=args.max_seq_len)

    model.prepare(optimizer,
                  CrossEntropy(),
                  Accuracy(topk=(1, )),
                  inputs,
                  labels,
                  device=place)

    if args.do_train:
        if args.init_checkpoint:
            model.load(args.init_checkpoint)
    elif args.do_val or args.do_infer:
        if not args.init_checkpoint:
            raise ValueError("args 'init_checkpoint' should be set if"
                             "only doing validation or infer!")
        model.load(args.init_checkpoint, reset_optimizer=True)

    if args.do_train:
        model.fit(train_data=train_data_generator,
                  eval_data=test_data_generator,
                  batch_size=args.batch_size,
                  epochs=args.epoch,
                  save_dir=args.checkpoints,
                  eval_freq=args.eval_freq,
                  save_freq=args.save_freq)
    elif args.do_val:
        eval_result = model.evaluate(eval_data=test_data_generator,
                                     batch_size=args.batch_size)
        print("Final eval result: acc: {:.4f}, loss: {:.4f}".format(
            eval_result['acc'], eval_result['loss'][0]))

    elif args.do_infer:
        preds = model.predict(test_data=infer_data_generator)
        preds = np.array(preds[0]).reshape((-1, args.num_labels))

        if args.output_dir:
            with open(os.path.join(args.output_dir, 'predictions.json'),
                      'w') as w:

                for p in range(len(preds)):
                    label = np.argmax(preds[p])
                    result = json.dumps({
                        'index': p,
                        'label': label,
                        'probs': preds[p].tolist()
                    })
                    w.write(result + '\n')
            print('Predictions saved at ' +
                  os.path.join(args.output_dir, 'predictions.json'))