Exemple #1
0
def main(cfgpath):
    # parsing config.json
    proj_dir = Path.cwd()
    params = json.load((proj_dir / cfgpath).open())

    # create dataset
    batch_size = params['training'].get('batch_size')
    tr_filepath = params['filepath'].get('tr')
    val_filepath = params['filepath'].get('val')
    tr_ds = create_dataset(tr_filepath, batch_size, True)
    val_ds = create_dataset(val_filepath, batch_size, False)

    # create pre_processor
    vocab = pickle.load(
        (proj_dir / params['filepath'].get('vocab')).open(mode='rb'))
    pre_processor = PreProcessor(vocab=vocab, tokenizer=Okt)

    # create model
    model = SenCNN(num_classes=2, vocab=vocab)

    # create optimizer & loss_fn
    epochs = params['training'].get('epochs')
    learning_rate = params['training'].get('learning_rate')
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy()

    # training

    for epoch in tqdm(range(epochs), desc='epochs'):
        tr_loss = 0
        tf.keras.backend.set_learning_phase(1)

        for step, mb in tqdm(enumerate(tr_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)
            with tf.GradientTape() as tape:
                mb_loss = loss_fn(y_mb, model(x_mb))
            grads = tape.gradient(target=mb_loss,
                                  sources=model.trainable_variables)
            opt.apply_gradients(
                grads_and_vars=zip(grads, model.trainable_variables))
            tr_loss += mb_loss.numpy()
        else:
            tr_loss /= (step + 1)

        tf.keras.backend.set_learning_phase(0)
        val_loss = 0
        for step, mb in tqdm(enumerate(val_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)
            mb_loss = loss_fn(y_mb, model(x_mb))
            val_loss += mb_loss.numpy()
        else:
            val_loss /= (step + 1)

        tqdm.write('epoch : {}, tr_loss : {:.3f}, val_loss : {:.3f}'.format(
            epoch + 1, tr_loss, val_loss))
Exemple #2
0
    def main(self):
        batch_size = self._batch_size
        tr_filepath = 'data/train.txt'
        val_filepath = 'data/val.txt'
        tr_ds = SmCnn.create_dataset(self,
                                     tr_filepath,
                                     batch_size,
                                     shuffle=True)
        val_ds = SmCnn.create_dataset(self,
                                      val_filepath,
                                      batch_size,
                                      shuffle=False)

        vocab = pd.read_pickle('data/vocab.pkl')
        pre_processor = PreProcessor(vocab=vocab, tokenizer=Mecab())

        # create model
        model = SmCnn()

        # create optimizer & loss_fn
        epochs = self._epochs
        learning_rate = self._learning_rate
        opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()

        # training
        for epoch in tqdm(range(epochs), desc='steps'):
            tr_loss = 0
            tf.keras.backend.set_learning_phase(1)

            for step, mb in tqdm(enumerate(tr_ds), desc='steps'):
                x_mb, y_mb = pre_processor.convert2idx(mb)
                with tf.GradientTape() as tape:
                    mb_loss = loss_fn(y_mb, model(x_mb))
                grads = tape.gradient(target=mb_loss,
                                      sources=model.trainable_variables)
                opt.apply_gradients(
                    grads_and_vars=zip(grads, model.trainable_variables))
                tr_loss += mb_loss.numpy()
            else:
                tr_loss /= (step + 1)

            tf.keras.backend.set_learning_phase(0)
            val_loss = 0
            for step, mb in tqdm(enumerate(val_ds), desc='steps'):
                x_mb, y_mb = pre_processor.convert2idx(mb)
                mb_loss = loss_fn(y_mb, model(x_mb))
                val_loss += mb_loss.numpy()
            else:
                val_loss /= (step + 1)

            tqdm.write(
                'epoch : {}, tr_loss : {:.3f}, val_loss : {:.3f}'.format(
                    epoch + 1, tr_loss, val_loss))
Exemple #3
0
def main(cfgpath, global_step):
    # parsing config.json
    proj_dir = Path.cwd()
    params = json.load((proj_dir / cfgpath).open())

    # create dataset
    batch_size = params['training'].get('batch_size')
    tr_filepath = params['filepath'].get('tr')
    val_filepath = params['filepath'].get('val')
    tr_ds = create_dataset(tr_filepath, batch_size, True)
    val_ds = create_dataset(val_filepath, batch_size, False)

    # create pre_processor
    vocab = pickle.load((proj_dir / params['filepath'].get('vocab')).open(mode='rb'))
    pre_processor = PreProcessor(vocab=vocab, tokenizer=MeCab().morphs, pad_idx=1)

    # create model

    model = SenCNN(num_classes=2, vocab=vocab)

    # create optimizer & loss_fn
    epochs = params['training'].get('epochs')
    learning_rate = params['training'].get('learning_rate')
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
    writer = tf.summary.create_file_writer(logdir='./runs/exp')


    # training
    for epoch in tqdm(range(epochs), desc='epochs'):

        tr_loss = 0
        tf.keras.backend.set_learning_phase(1)

        for step, mb in tqdm(enumerate(tr_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)

            with tf.GradientTape() as tape:
                mb_loss = loss_fn(y_mb, model(x_mb))
                grads = tape.gradient(target=mb_loss, sources=model.trainable_variables)
            opt.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))
            tr_loss += mb_loss.numpy()

            if tf.equal(opt.iterations % global_step, 0):
                with writer.as_default():
                    val_loss = evaluate(model, val_ds, loss_fn, pre_processor.convert2idx)
                    tf.summary.scalar('tr_loss', tr_loss / (step + 1), step=opt.iterations)
                    tf.summary.scalar('val_loss', val_loss, step=opt.iterations)
                    tf.keras.backend.set_learning_phase(1)
        else:
            tr_loss /= (step + 1)

        val_loss = evaluate(model, val_ds, loss_fn, pre_processor.convert2idx)
        tqdm.write('epoch : {}, tr_loss : {:.3f}, val_loss : {:.3f}'.format(epoch + 1, tr_loss, val_loss))

    ckpt_path = proj_dir / params['filepath'].get('ckpt')
    ckpt = tf.train.Checkpoint(model=model)
    ckpt.save(ckpt_path)
Exemple #4
0
def main():
    tr_filepath = Path.cwd() / 'data' / 'train.txt'
    val_filepath = Path.cwd() / 'data' / 'val.txt'

    with open(Path.cwd() / 'data/vocab.pkl', mode='rb') as f:
        vocab = pickle.load(f)

    tr_ds = create_dataset(str(tr_filepath), 128, shuffle=True)
    val_ds = create_dataset(str(val_filepath), 128,
                            shuffle=False)  # 평가 데이터는 셔플 ㄴㄴ

    tokenized = Okt()
    pre_processor = PreProcessor(vocab=vocab, tokenizer=tokenized)

    # create model
    model = SenCNN(num_classes=2, vocab=vocab)

    # create optimizer & loss_fn
    epochs = 10
    learning_rate = 1e-3

    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy(from_logits=True)

    # metrics
    tr_loss_metric = tf.keras.metrics.Mean(name='train_loss')
    tr_accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(
        name='train_accuracy')
    val_loss_metric = tf.keras.metrics.Mean(name='validation_loss')
    val_accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(
        name='validation_accuracy')

    # training

    for epoch in tqdm(range(epochs), desc='epochs'):
        # trining data
        tf.keras.backend.set_learning_phase(1)  # train mode

        for _, mb in tqdm(enumerate(tr_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)
            x_mb = pre_processor.pad_sequences(x_mb, 70)
            x_mb, y_mb = pre_processor.convert_to_tensor(x_mb, y_mb)

            with tf.GradientTape() as tape:
                mb_loss = loss_fn(y_mb, model(x_mb))
            grads = tape.gradient(target=mb_loss,
                                  sources=model.trainable_variables)
            opt.apply_gradients(
                grads_and_vars=zip(grads, model.trainable_variables))

            tr_loss_metric.update_state(mb_loss)
            tr_accuracy_metric(y_mb, model(x_mb))

        tr_mean_loss = tr_loss_metric.result()
        tr_mean_accuracy = tr_accuracy_metric.result()

        # test data
        tf.keras.backend.set_learning_phase(0)  # test mode
        for _, mb in tqdm(enumerate(val_ds), desc='steps'):
            x_mb, y_mb = pre_processor.convert2idx(mb)
            x_mb = pre_processor.pad_sequences(x_mb, 70)
            x_mb, y_mb = pre_processor.convert_to_tensor(x_mb, y_mb)
            mb_loss = loss_fn(y_mb, model(x_mb))

            val_loss_metric.update_state(mb_loss)
            val_accuracy_metric.update_state(y_mb, model(x_mb))

        val_mean_loss = val_loss_metric.result()
        val_mean_accuracy = val_accuracy_metric.result()

        tqdm.write(
            'epoch : {}, tr_accuracy : {:.3f}, tr_loss : {:.3f}, val_accuracy : {:.3f}, val_loss : {:.3f}'
            .format(epoch + 1, tr_mean_accuracy, tr_mean_loss,
                    val_mean_accuracy, val_mean_loss))

    ckpt_path = Path.cwd() / 'checkpoint/ckpt'
    ckpt = tf.train.Checkpoint(model=model)
    ckpt.save(ckpt_path)