Exemplo n.º 1
0
def main(cfgpath):
    # parsing config.json
    proj_dir = Path.cwd()
    params = json.load((proj_dir / cfgpath).open())

    # create dataset
    batch_size = params['training'].get('batch_size')
    tr_filepath = params['filepath'].get('tr')
    val_filepath = params['filepath'].get('val')
    tst_filepath = params['filepath'].get('tst')

    tr_ds = create_dataset(tr_filepath, batch_size, False, False)
    val_ds = create_dataset(val_filepath, batch_size, False, False)
    tst_ds = create_dataset(tst_filepath, batch_size, False, False)

    # create pre_processor
    vocab = pickle.load(
        (proj_dir / params['filepath'].get('vocab')).open(mode='rb'))
    pre_processor = PreProcessor(vocab=vocab,
                                 tokenizer=MeCab().morphs,
                                 pad_idx=1)

    # create model
    model = SenCNN(num_classes=2, vocab=vocab)
    ckpt = tf.train.Checkpoint(model=model)
    ckpt.restore(save_path=tf.train.latest_checkpoint(proj_dir / 'checkpoint'))

    # evluation
    tr_acc = get_accuracy(model, tr_ds, pre_processor.convert2idx)
    val_acc = get_accuracy(model, val_ds, pre_processor.convert2idx)
    tst_acc = get_accuracy(model, tst_ds, pre_processor.convert2idx)

    print('tr_acc: {:.2%}, val_acc : {:.2%}, tst_acc: {:.2%}'.format(
        tr_acc, val_acc, tst_acc))
Exemplo n.º 2
0
def main(cfgpath, global_step):
    # parsing config.json
    proj_dir = Path.cwd()
    params = json.load((proj_dir / cfgpath).open())

    # create dataset
    batch_size = params['training'].get('batch_size')
    tr_filepath = params['filepath'].get('tr')
    val_filepath = params['filepath'].get('val')
    tr_ds = create_dataset(tr_filepath, batch_size, True)
    val_ds = create_dataset(val_filepath, batch_size, False)

    # create pre_processor
    vocab = pickle.load((proj_dir / params['filepath'].get('vocab')).open(mode='rb'))
    pre_processor = PreProcessor(vocab=vocab, tokenizer=MeCab().morphs, pad_idx=1)

    # create model

    model = SenCNN(num_classes=2, vocab=vocab)

    # create optimizer & loss_fn
    epochs = params['training'].get('epochs')
    learning_rate = params['training'].get('learning_rate')
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
    writer = tf.summary.create_file_writer(logdir='./runs/exp')


    # training
    for epoch in tqdm(range(epochs), desc='epochs'):

        tr_loss = 0
        tf.keras.backend.set_learning_phase(1)

        for step, mb in tqdm(enumerate(tr_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)

            with tf.GradientTape() as tape:
                mb_loss = loss_fn(y_mb, model(x_mb))
                grads = tape.gradient(target=mb_loss, sources=model.trainable_variables)
            opt.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))
            tr_loss += mb_loss.numpy()

            if tf.equal(opt.iterations % global_step, 0):
                with writer.as_default():
                    val_loss = evaluate(model, val_ds, loss_fn, pre_processor.convert2idx)
                    tf.summary.scalar('tr_loss', tr_loss / (step + 1), step=opt.iterations)
                    tf.summary.scalar('val_loss', val_loss, step=opt.iterations)
                    tf.keras.backend.set_learning_phase(1)
        else:
            tr_loss /= (step + 1)

        val_loss = evaluate(model, val_ds, loss_fn, pre_processor.convert2idx)
        tqdm.write('epoch : {}, tr_loss : {:.3f}, val_loss : {:.3f}'.format(epoch + 1, tr_loss, val_loss))

    ckpt_path = proj_dir / params['filepath'].get('ckpt')
    ckpt = tf.train.Checkpoint(model=model)
    ckpt.save(ckpt_path)
Exemplo n.º 3
0
def main(cfgpath):
    # parsing config.json
    proj_dir = Path.cwd()
    params = json.load((proj_dir / cfgpath).open())

    # create dataset
    batch_size = params['training'].get('batch_size')
    tr_filepath = params['filepath'].get('tr')
    val_filepath = params['filepath'].get('val')
    tr_ds = create_dataset(tr_filepath, batch_size, True)
    val_ds = create_dataset(val_filepath, batch_size, False)

    # create pre_processor
    vocab = pickle.load(
        (proj_dir / params['filepath'].get('vocab')).open(mode='rb'))
    pre_processor = PreProcessor(vocab=vocab, tokenizer=Okt)

    # create model
    model = SenCNN(num_classes=2, vocab=vocab)

    # create optimizer & loss_fn
    epochs = params['training'].get('epochs')
    learning_rate = params['training'].get('learning_rate')
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy()

    # training

    for epoch in tqdm(range(epochs), desc='epochs'):
        tr_loss = 0
        tf.keras.backend.set_learning_phase(1)

        for step, mb in tqdm(enumerate(tr_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)
            with tf.GradientTape() as tape:
                mb_loss = loss_fn(y_mb, model(x_mb))
            grads = tape.gradient(target=mb_loss,
                                  sources=model.trainable_variables)
            opt.apply_gradients(
                grads_and_vars=zip(grads, model.trainable_variables))
            tr_loss += mb_loss.numpy()
        else:
            tr_loss /= (step + 1)

        tf.keras.backend.set_learning_phase(0)
        val_loss = 0
        for step, mb in tqdm(enumerate(val_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)
            mb_loss = loss_fn(y_mb, model(x_mb))
            val_loss += mb_loss.numpy()
        else:
            val_loss /= (step + 1)

        tqdm.write('epoch : {}, tr_loss : {:.3f}, val_loss : {:.3f}'.format(
            epoch + 1, tr_loss, val_loss))
Exemplo n.º 4
0
def main(argv):
    test_data = Path.cwd() / 'data_in' / 'test.txt'

    with open(Path.cwd() / 'data_in' / 'vocab.pkl', mode='rb') as io:
        vocab = pickle.load(io)

    test = tf.data.TextLineDataset(str(test_data)).batch(batch_size=FLAGS.batch_size)

    tokenized = Mecab()
    processing = Corpus(vocab=vocab, tokenizer=tokenized)

    # init params
    classes = FLAGS.classes
    max_length = FLAGS.length
    epochs = FLAGS.epochs
    batch_size = FLAGS.batch_size
    learning_rate = FLAGS.learning_rate

    # create model
    sen_cnn = SenCNN(vocab=vocab, classes=classes)

    # create optimizer & loss_fn
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy()

    test_loss_metric = tf.keras.metrics.Mean(name='val_loss')
    test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')

    ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=sen_cnn)
    manager = tf.train.CheckpointManager(ckpt, './data_out/tf_ckpts', max_to_keep=3)
    ckpt.restore(manager.latest_checkpoint)

    if manager.latest_checkpoint:
        print("Restored from {}".format(manager.latest_checkpoint))
    else:
        print("Initializing from scratch.")

    tf.keras.backend.set_learning_phase(0)
    test_loss_metric.reset_states()
    test_acc_metric.reset_states()

    for step, val in enumerate(test):
        data, label = processing.token2idex(val)
        logits = sen_cnn(data)
        val_loss = loss_fn(label, logits)
        # val_loss += mb_loss.numpy()
        test_loss_metric.update_state(val_loss)
        test_acc_metric.update_state(label, logits)

    test_loss = test_loss_metric.result()

    tqdm.write(
        'epoch : {}, tr_acc : {:.3f}%, tr_loss : {:.3f}, '.format(1, test_acc_metric.result() * 100, test_loss))
Exemplo n.º 5
0
def main(args):
    dataset_config = Config(args.dataset_config)
    model_config = Config(args.model_config)
    exp_dir = Path("experiments") / model_config.type
    exp_dir = exp_dir.joinpath(
        f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
    )

    tokenizer = get_tokenizer(dataset_config, model_config)

    # model (restore)
    checkpoint_manager = CheckpointManager(exp_dir)
    checkpoint = checkpoint_manager.load_checkpoint("best.tar")
    model = SenCNN(num_classes=model_config.num_classes, vocab=tokenizer.vocab)
    model.load_state_dict(checkpoint["model_state_dict"])

    # evaluation
    summary_manager = SummaryManager(exp_dir)
    filepath = getattr(dataset_config, args.data)
    ds = Corpus(filepath, tokenizer.split_and_transform)
    dl = DataLoader(ds, batch_size=args.batch_size, num_workers=4)

    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    model.to(device)

    summary = evaluate(model, dl, {"loss": nn.CrossEntropyLoss(), "acc": acc}, device)

    summary_manager.load("summary.json")
    summary_manager.update({f"{args.data}": summary})
    summary_manager.save("summary.json")
    print(f"loss: {summary['loss']:.3f}, acc: {summary['acc']:.2%}")
Exemplo n.º 6
0
def main(json_path):
    cwd = Path.cwd()
    with open(cwd / json_path) as io:
        params = json.loads(io.read())

    # tokenizer
    vocab_path = params['filepath'].get('vocab')
    with open(cwd / vocab_path, mode='rb') as io:
        vocab = pickle.load(io)
    length = params['padder'].get('length')
    padder = PadSequence(length=length, pad_val=vocab.to_indices(vocab.padding_token))
    tokenizer = Tokenizer(vocab=vocab, split_fn=MeCab().morphs, pad_fn=padder)

    # model (restore)
    save_path = cwd / params['filepath'].get('ckpt')
    ckpt = torch.load(save_path)
    num_classes = params['model'].get('num_classes')
    model = SenCNN(num_classes=num_classes, vocab=tokenizer.vocab)
    model.load_state_dict(ckpt['model_state_dict'])

    # evaluation
    batch_size = params['training'].get('batch_size')
    tr_path = cwd / params['filepath'].get('tr')
    val_path = cwd / params['filepath'].get('val')
    tst_path = cwd / params['filepath'].get('tst')

    tr_ds = Corpus(tr_path, tokenizer.split_and_transform)
    tr_dl = DataLoader(tr_ds, batch_size=batch_size, num_workers=4)
    val_ds = Corpus(val_path, tokenizer.split_and_transform)
    val_dl = DataLoader(val_ds, batch_size=batch_size, num_workers=4)
    tst_ds = Corpus(tst_path, tokenizer.split_and_transform)
    tst_dl = DataLoader(tst_ds, batch_size=batch_size, num_workers=4)

    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    model.to(device)

    tr_acc = get_accuracy(model, tr_dl, device)
    val_acc = get_accuracy(model, val_dl, device)
    tst_acc = get_accuracy(model, tst_dl, device)

    print('tr_acc: {:.2%}, val_acc : {:.2%}, tst_acc: {:.2%}'.format(tr_acc, val_acc, tst_acc))
Exemplo n.º 7
0
app.config.from_pyfile("config.py")
app.database = create_engine(app.config["DB_URL"],
                             encoding="utf-8",
                             max_overflow=0)

# preprocessor & model
num_classes = app.config["MODEL"]["num_classes"]
max_length = app.config["MODEL"]["length"]

with open("model/checkpoint/vocab.pkl", mode="rb") as io:
    vocab = pickle.load(io)
pad_sequence = PadSequence(length=max_length,
                           pad_val=vocab.to_indices(vocab.padding_token))
tokenizer = Tokenizer(vocab=vocab, split_fn=split_morphs, pad_fn=pad_sequence)

model = SenCNN(num_classes=app.config["MODEL"]["num_classes"], vocab=vocab)
ckpt = torch.load("model/checkpoint/best.tar",
                  map_location=torch.device("cpu"))
model.load_state_dict(ckpt["model_state_dict"])
model.eval()


@app.route("/alive_check", methods=["GET"])
def alive_check():
    return "alive", 200


@app.route("/inference", methods=["POST"])
def inference():
    payload = request.json
    sequence = payload.get("comment")
Exemplo n.º 8
0
def main(args):
    dataset_config = Config(args.dataset_config)
    model_config = Config(args.model_config)

    exp_dir = Path("experiments") / model_config.type
    exp_dir = exp_dir.joinpath(
        f"epochs_{args.epochs}_batch_size_{args.batch_size}_learning_rate_{args.learning_rate}"
    )

    if not exp_dir.exists():
        exp_dir.mkdir(parents=True)

    if args.fix_seed:
        torch.manual_seed(777)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    tokenizer = get_tokenizer(dataset_config, model_config)
    tr_dl, val_dl = get_data_loaders(dataset_config, tokenizer, args.batch_size)
    model = SenCNN(num_classes=model_config.num_classes, vocab=tokenizer.vocab)

    loss_fn = nn.CrossEntropyLoss()
    opt = optim.Adam(params=model.parameters(), lr=args.learning_rate)
    scheduler = ReduceLROnPlateau(opt, patience=5)
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    model.to(device)

    writer = SummaryWriter(f"{exp_dir}/runs")
    checkpoint_manager = CheckpointManager(exp_dir)
    summary_manager = SummaryManager(exp_dir)
    best_val_loss = 1e10

    for epoch in tqdm(range(args.epochs), desc="epochs"):

        tr_loss = 0
        tr_acc = 0

        model.train()
        for step, mb in tqdm(enumerate(tr_dl), desc="steps", total=len(tr_dl)):
            x_mb, y_mb = map(lambda elm: elm.to(device), mb)

            opt.zero_grad()
            y_hat_mb = model(x_mb)
            mb_loss = loss_fn(y_hat_mb, y_mb)
            mb_loss.backward()
            clip_grad_norm_(model._fc.weight, 5)
            opt.step()

            with torch.no_grad():
                mb_acc = acc(y_hat_mb, y_mb)

            tr_loss += mb_loss.item()
            tr_acc += mb_acc.item()

            if (epoch * len(tr_dl) + step) % args.summary_step == 0:
                val_loss = evaluate(model, val_dl, {"loss": loss_fn}, device)["loss"]
                writer.add_scalars("loss", {"train": tr_loss / (step + 1), "validation": val_loss},
                                   epoch * len(tr_dl) + step)
                model.train()
        else:
            tr_loss /= step + 1
            tr_acc /= step + 1

            tr_summary = {"loss": tr_loss, "acc": tr_acc}
            val_summary = evaluate(model, val_dl, {"loss": loss_fn, "acc": acc}, device)
            scheduler.step(val_summary["loss"])
            tqdm.write(f"epoch: {epoch+1}\n"
                       f"tr_loss: {tr_summary['loss']:.3f}, val_loss: {val_summary['loss']:.3f}\n"
                       f"tr_acc: {tr_summary['acc']:.2%}, val_acc: {val_summary['acc']:.2%}")

            val_loss = val_summary["loss"]
            is_best = val_loss < best_val_loss

            if is_best:
                state = {
                    "epoch": epoch + 1,
                    "model_state_dict": model.state_dict(),
                    "opt_state_dict": opt.state_dict(),
                }
                summary = {
                    "epoch": epoch + 1,
                    "train": tr_summary,
                    "validation": val_summary,
                }

                summary_manager.update(summary)
                summary_manager.save("summary.json")
                checkpoint_manager.save_checkpoint(state, "best.tar")

                best_val_loss = val_loss
Exemplo n.º 9
0
def main():
    tr_filepath = Path.cwd() / 'data' / 'train.txt'
    val_filepath = Path.cwd() / 'data' / 'val.txt'

    with open(Path.cwd() / 'data/vocab.pkl', mode='rb') as f:
        vocab = pickle.load(f)

    tr_ds = create_dataset(str(tr_filepath), 128, shuffle=True)
    val_ds = create_dataset(str(val_filepath), 128,
                            shuffle=False)  # 평가 데이터는 셔플 ㄴㄴ

    tokenized = Okt()
    pre_processor = PreProcessor(vocab=vocab, tokenizer=tokenized)

    # create model
    model = SenCNN(num_classes=2, vocab=vocab)

    # create optimizer & loss_fn
    epochs = 10
    learning_rate = 1e-3

    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy(from_logits=True)

    # metrics
    tr_loss_metric = tf.keras.metrics.Mean(name='train_loss')
    tr_accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(
        name='train_accuracy')
    val_loss_metric = tf.keras.metrics.Mean(name='validation_loss')
    val_accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(
        name='validation_accuracy')

    # training

    for epoch in tqdm(range(epochs), desc='epochs'):
        # trining data
        tf.keras.backend.set_learning_phase(1)  # train mode

        for _, mb in tqdm(enumerate(tr_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)
            x_mb = pre_processor.pad_sequences(x_mb, 70)
            x_mb, y_mb = pre_processor.convert_to_tensor(x_mb, y_mb)

            with tf.GradientTape() as tape:
                mb_loss = loss_fn(y_mb, model(x_mb))
            grads = tape.gradient(target=mb_loss,
                                  sources=model.trainable_variables)
            opt.apply_gradients(
                grads_and_vars=zip(grads, model.trainable_variables))

            tr_loss_metric.update_state(mb_loss)
            tr_accuracy_metric(y_mb, model(x_mb))

        tr_mean_loss = tr_loss_metric.result()
        tr_mean_accuracy = tr_accuracy_metric.result()

        # test data
        tf.keras.backend.set_learning_phase(0)  # test mode
        for _, mb in tqdm(enumerate(val_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)
            x_mb = pre_processor.pad_sequences(x_mb, 70)
            x_mb, y_mb = pre_processor.convert_to_tensor(x_mb, y_mb)
            mb_loss = loss_fn(y_mb, model(x_mb))

            val_loss_metric.update_state(mb_loss)
            val_accuracy_metric.update_state(y_mb, model(x_mb))

        val_mean_loss = val_loss_metric.result()
        val_mean_accuracy = val_accuracy_metric.result()

        tqdm.write(
            'epoch : {}, tr_accuracy : {:.3f}, tr_loss : {:.3f}, val_accuracy : {:.3f}, val_loss : {:.3f}'
            .format(epoch + 1, tr_mean_accuracy, tr_mean_loss,
                    val_mean_accuracy, val_mean_loss))

    ckpt_path = Path.cwd() / 'checkpoint/ckpt'
    ckpt = tf.train.Checkpoint(model=model)
    ckpt.save(ckpt_path)
Exemplo n.º 10
0
    data_dir = Path(args.data_dir)
    model_dir = Path(args.model_dir)
    data_config = Config(json_path=data_dir / 'config.json')
    model_config = Config(json_path=model_dir / 'config.json')

    # tokenizer
    with open(data_config.vocab, mode='rb') as io:
        vocab = pickle.load(io)
    pad_sequence = PadSequence(length=model_config.length,
                               pad_val=vocab.to_indices(vocab.padding_token))
    tokenizer = Tokenizer(vocab=vocab,
                          split_fn=MeCab().morphs,
                          pad_fn=pad_sequence)

    # model
    model = SenCNN(num_classes=model_config.num_classes, vocab=tokenizer.vocab)

    # training
    tr_ds = Corpus(data_config.train, tokenizer.split_and_transform)
    tr_dl = DataLoader(tr_ds,
                       batch_size=model_config.batch_size,
                       shuffle=True,
                       num_workers=4,
                       drop_last=True)
    val_ds = Corpus(data_config.validation, tokenizer.split_and_transform)
    val_dl = DataLoader(val_ds, batch_size=model_config.batch_size)

    loss_fn = nn.CrossEntropyLoss()
    opt = optim.Adam(params=model.parameters(), lr=model_config.learning_rate)
    scheduler = ReduceLROnPlateau(opt, patience=5)
    device = torch.device(
Exemplo n.º 11
0
    data_config = Config(json_path=data_dir / 'config.json')
    model_config = Config(json_path=model_dir / 'config.json')

    # tokenizer
    with open(data_config.vocab, mode='rb') as io:
        vocab = pickle.load(io)
    pad_sequence = PadSequence(length=model_config.length,
                               pad_val=vocab.to_indices(vocab.padding_token))
    tokenizer = Tokenizer(vocab=vocab,
                          split_fn=MeCab().morphs,
                          pad_fn=pad_sequence)

    # model (restore)
    checkpoint_manager = CheckpointManager(model_dir)
    checkpoint = checkpoint_manager.load_checkpoint(args.restore_file + '.tar')
    model = SenCNN(num_classes=model_config.num_classes, vocab=tokenizer.vocab)
    model.load_state_dict(checkpoint['model_state_dict'])

    # evaluation
    summary_manager = SummaryManager(model_dir)
    filepath = getattr(data_config, args.data_name)
    ds = Corpus(filepath, tokenizer.split_and_transform)
    dl = DataLoader(ds, batch_size=model_config.batch_size, num_workers=4)

    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    model.to(device)

    summary = evaluate(model, dl, {
        'loss': nn.CrossEntropyLoss(),
        'acc': acc
Exemplo n.º 12
0
def main(json_path):
    cwd = Path.cwd()
    with open(cwd / json_path) as io:
        params = json.loads(io.read())

    # tokenizer
    vocab_path = params['filepath'].get('vocab')
    with open(cwd / vocab_path, mode='rb') as io:
        vocab = pickle.load(io)
    length = params['padder'].get('length')
    padder = PadSequence(length=length, pad_val=vocab.to_indices(vocab.padding_token))
    tokenizer = Tokenizer(vocab=vocab, split_fn=MeCab().morphs, pad_fn=padder)

    # model
    num_classes = params['model'].get('num_classes')
    model = SenCNN(num_classes=num_classes, vocab=tokenizer.vocab)

    # training
    epochs = params['training'].get('epochs')
    batch_size = params['training'].get('batch_size')
    learning_rate = params['training'].get('learning_rate')
    global_step = params['training'].get('global_step')

    tr_path = cwd / params['filepath'].get('tr')
    val_path = cwd / params['filepath'].get('val')
    tr_ds = Corpus(tr_path, tokenizer.split_and_transform)
    tr_dl = DataLoader(tr_ds, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True)
    val_ds = Corpus(val_path, tokenizer.split_and_transform)
    val_dl = DataLoader(val_ds, batch_size=batch_size)

    loss_fn = nn.CrossEntropyLoss()
    opt = optim.Adam(params=model.parameters(), lr=learning_rate)
    scheduler = ReduceLROnPlateau(opt, patience=5)
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    model.to(device)

    writer = SummaryWriter('./runs/{}'.format(params['version']))
    for epoch in tqdm(range(epochs), desc='epochs'):

        tr_loss = 0

        model.train()
        for step, mb in tqdm(enumerate(tr_dl), desc='steps', total=len(tr_dl)):
            x_mb, y_mb = map(lambda elm: elm.to(device), mb)

            opt.zero_grad()
            mb_loss = loss_fn(model(x_mb), y_mb)
            mb_loss.backward()
            clip_grad_norm_(model._fc.weight, 5)
            opt.step()

            tr_loss += mb_loss.item()

            if (epoch * len(tr_dl) + step) % global_step == 0:
                val_loss = evaluate(model, val_dl, loss_fn, device)
                writer.add_scalars('loss', {'train': tr_loss / (step + 1),
                                            'val': val_loss}, epoch * len(tr_dl) + step)

                model.train()
        else:
            tr_loss /= (step + 1)

        val_loss = evaluate(model, val_dl, loss_fn, device)
        scheduler.step(val_loss)
        tqdm.write('epoch : {}, tr_loss : {:.3f}, val_loss : {:.3f}'.format(epoch + 1, tr_loss, val_loss))

    ckpt = {'model_state_dict': model.state_dict(),
            'opt_state_dict': opt.state_dict()}

    save_path = cwd / params['filepath'].get('ckpt')
    torch.save(ckpt, save_path)
Exemplo n.º 13
0
def main(argv):
    train_data = Path.cwd() / 'data_in' / 'train.txt'
    val_data = Path.cwd() / 'data_in' / 'val.txt'

    with open(Path.cwd() / 'data_in' / 'vocab.pkl', mode='rb') as io:
        vocab = pickle.load(io)

    train = tf.data.TextLineDataset(str(train_data)).shuffle(buffer_size=1000).batch(batch_size=FLAGS.batch_size,
                                                                                     drop_remainder=True)
    eval = tf.data.TextLineDataset(str(val_data)).batch(batch_size=FLAGS.batch_size, drop_remainder=True)

    tokenized = MeCab()
    processing = Corpus(vocab=vocab, tokenizer=tokenized)

    # init params
    classes = FLAGS.classes
    max_length = FLAGS.length
    epochs = FLAGS.epochs
    learning_rate = FLAGS.learning_rate
    global_step = 1000

    # create model
    sen_cnn = SenCNN(vocab=vocab, classes=classes)

    # create optimizer & loss_fn
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy(from_logits=True)

    train_loss_metric = tf.keras.metrics.Mean(name='train_loss')
    train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
    val_loss_metric = tf.keras.metrics.Mean(name='val_loss')
    val_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')

    train_summary_writer = tf.summary.create_file_writer('./data_out/summaries/train')
    eval_summary_writer = tf.summary.create_file_writer('./data_out/summaries/eval')

    ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=sen_cnn)
    manager = tf.train.CheckpointManager(ckpt, './data_out/tf_ckpts', max_to_keep=3)
    ckpt.restore(manager.latest_checkpoint)

    if manager.latest_checkpoint:
        print("Restored from {}".format(manager.latest_checkpoint))
    else:
        print("Initializing from scratch.")

    # training
    for epoch in tqdm(range(epochs), desc='epochs'):

        train_loss_metric.reset_states()
        train_acc_metric.reset_states()
        val_loss_metric.reset_states()
        val_acc_metric.reset_states()
        tf.keras.backend.set_learning_phase(1)

        tr_loss = 0
        with train_summary_writer.as_default():
            for step, val in tqdm(enumerate(train), desc='steps'):
                data, label = processing.token2idex(val)
                with tf.GradientTape() as tape:
                    logits = sen_cnn(data)
                    train_loss = loss_fn(label, logits)
                ckpt.step.assign_add(1)
                grads = tape.gradient(target=train_loss, sources=sen_cnn.trainable_variables)
                opt.apply_gradients(grads_and_vars=zip(grads, sen_cnn.trainable_variables))
                # tr_loss += pred_loss.numpy()

                train_loss_metric.update_state(train_loss)
                train_acc_metric.update_state(label, logits)

                if tf.equal(opt.iterations % global_step, 0):
                    tf.summary.scalar('loss', train_loss_metric.result(), step=opt.iterations)

        # else:
        # tr_loss /= (step + 1)
        # print("t_loss {}".format(tr_loss))
        tr_loss = train_loss_metric.result()
        save_path = manager.save()
        print(save_path)

        tf.keras.backend.set_learning_phase(0)

        val_loss = 0
        with eval_summary_writer.as_default():
            for step, val in tqdm(enumerate(eval), desc='steps'):
                data, label = processing.token2idex(val)
                logits = sen_cnn(data)
                val_loss = loss_fn(label, logits)
                # val_loss += mb_loss.numpy()
                val_loss_metric.update_state(val_loss)
                val_acc_metric.update_state(label, logits)
                tf.summary.scalar('loss', val_loss_metric.result(), step=step)

        val_loss = val_loss_metric.result()

        tqdm.write(
            'epoch : {}, tr_acc : {:.3f}%, tr_loss : {:.3f}, val_acc : {:.3f}%, val_loss : {:.3f}'.format(epoch + 1,
                                                                                                          train_acc_metric.result() * 100,
                                                                                                          tr_loss,
                                                                                                          val_acc_metric.result() * 100,
                                                                                                          val_loss))