Exemplo n.º 1
0
                            optimize, model.loss_rec_cyc, model.kld_loss
                        ],
                        feed_dict=feed_dict)
                    losses.add([
                        loss, loss_rec, loss_adv, loss_d0, loss_d1,
                        loss_rec_cyc, loss_vae
                    ])

                    # grad_rec, grad_adv, grad = sess.run([model.grad_rec_norm,
                    #    model.grad_adv_norm, model.grad_norm],
                    #    feed_dict=feed_dict)
                    # gradients.add([grad_rec, grad_adv, grad])

                    step += 1
                    if step % args.steps_per_checkpoint == 0:
                        losses.output('step %d, time %.0fs,' %
                                      (step, time.time() - start_time))
                        losses.clear()

                        checkpoint_path = os.path.join(args.model,
                                                       'model.ckpt')
                        model.saver.save(sess, args.model, global_step=step)
                        print('\t\tModel saved to {}'.format(checkpoint_path))
                        # gradients.output()
                        # gradients.clear()

                if args.dev:
                    dev_losses = transfer(model, decoder, sess, args, vocab,
                                          dev0, dev1,
                                          args.output + '.epoch%d' % epoch)
                    dev_losses.output('dev')
Exemplo n.º 2
0
def run(sess, batch_gen, dataset, model, params):

    S_batches, T_batches = batch_gen
    train_set, val_set, tar_un_set, test_set = dataset

    n_train = len(train_set['lm_labels'])
    batch_num = n_train / params.batch_size
    print('training number:%d, batch num:%d' % (n_train, batch_num))

    best_val_ote_score, best_val_ts_score = -999.0, -999.0
    best_epoch = -1

    start_time = time.time()
    losses = Accumulator(
        ['loss', 'asp_loss', 'ts_loss', 'opn_loss', 'ote_transfer_loss'],
        batch_num)

    for epoch in range(params.n_epoch):

        cur_lr = params.lr

        for i in range(batch_num):

            xs, win_xs, length_s, ys_ote, ys_ts, ys_opn, ys_stm, _, _ = S_batches.next(
            )
            xt, win_xt, length_t, yt_ote, yt_ts, yt_opn, yt_stm, _, _ = T_batches.next(
            )
            x = np.vstack([xs, xt])
            win_x = np.vstack([win_xs, win_xt])
            length = np.hstack([length_s, length_t])
            y_ote = np.vstack([ys_ote, yt_ote])
            y_ts = np.vstack([ys_ts, yt_ts])
            y_opn = np.vstack([ys_opn, yt_opn])
            y_stm = np.vstack([ys_stm, yt_stm])

            feed_dict = get_train_feed_dict(model,
                                            x,
                                            win_x,
                                            length,
                                            y_ote,
                                            y_ts,
                                            y_opn,
                                            y_stm,
                                            cur_lr,
                                            params.dropout_rate,
                                            train_flag=True)

            _, loss, asp_loss, ts_loss, opn_loss = sess.run(
                [
                    model.train_op, model.loss, model.asp_loss, model.ts_loss,
                    model.opn_loss
                ],
                feed_dict=feed_dict)
            _, ote_transfer_loss = sess.run(
                [model.ote_transfer_op, model.ote_transfer_loss],
                feed_dict=feed_dict)
            losses.add([loss, asp_loss, ts_loss, opn_loss, ote_transfer_loss])

        if epoch % params.evaluation_interval == 0:

            print('--------------------epoch %d--------------------' %
                  (epoch + 1))
            print('learning_rate:', cur_lr)

            losses.output('time %.5fs,' % (time.time() - start_time))
            losses.clear()

            train_ote_scores, train_ts_scores, _, _ = eval_metric(
                sess, model, params, train_set, domain_flag=True)
            train_ote_p, train_ote_r, train_ote_f1 = train_ote_scores
            train_ts_macro_f1, train_ts_micro_p, train_ts_micro_r, train_ts_micro_f1 = train_ts_scores

            print(
                "train performance: ote: precision: %.4f, recall: %.4f, f1: %.4f, ts: precision: %.4f, recall: %.4f, micro-f1: %.4f"
                % (train_ote_p, train_ote_r, train_ote_f1, train_ts_micro_p,
                   train_ts_micro_r, train_ts_micro_f1))

            val_ote_scores, val_ts_scores, _, _ = eval_metric(sess,
                                                              model,
                                                              params,
                                                              val_set,
                                                              domain_flag=True)
            val_ote_p, val_ote_r, val_ote_f1 = val_ote_scores
            val_ts_macro_f1, val_ts_micro_p, val_ts_micro_r, val_ts_micro_f1 = val_ts_scores

            print(
                "val performance: ote: precision: %.4f, recall: %.4f, f1: %.4f, ts: precision: %.4f, recall: %.4f, micro-f1: %.4f"
                % (val_ote_p, val_ote_r, val_ote_f1, val_ts_micro_p,
                   val_ts_micro_r, val_ts_micro_f1))

            if args.selection_schema == 'OTE_TS':
                if val_ts_micro_f1 > best_val_ts_score and val_ote_f1 > best_val_ote_score:
                    best_val_ts_score = val_ts_micro_f1
                    best_val_ote_score = val_ote_f1
                    best_epoch = epoch + 1
                    print("Save...")
                    model.save_model(sess)
            if args.selection_schema == 'TS':
                if val_ts_micro_f1 > best_val_ts_score:
                    best_val_ts_score = val_ts_micro_f1
                    best_val_ote_score = val_ote_f1
                    best_epoch = epoch + 1
                    print("Save...")
                    model.save_model(sess)

    print('Store the best model at the epoch: %d\n' % best_epoch)
                    losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

                    # grad_rec, grad_adv, grad = sess.run([model.grad_rec_norm,
                    #    model.grad_adv_norm, model.grad_norm],
                    #    feed_dict=feed_dict)
                    # gradients.add([grad_rec, grad_adv, grad])
                    step += 1
                    if args.show_progress:
                        bar.next()
                    # This info will cut into the output of bar,
                    # so it should be under the scope of not_show_progress
                    # or add a /n before it
                    if step % args.steps_per_checkpoint == 0:
                        # 前面的那句if else换行判断加上括号好一点
                        losses.output(('\n' if args.show_progress else '') +
                                      'step %d, time %.0fs,' %
                                      (step, time.time() - start_time))
                        losses.clear()

                        # gradients.output()
                        # gradients.clear()

                # dev默认值是空字符,是False,进入到else
                if args.dev:
                    dev_losses = transfer(model, decoder, sess, args, vocab,
                                          dev0, dev1,
                                          args.output + '.epoch%d' % epoch)
                    dev_losses.output(('\n' if args.show_progress else '') +
                                      'dev result: ')
                    if dev_losses.values[0] < best_dev:
                        best_dev = dev_losses.values[0]
                    else:
                        optimize = model.optimize_rec

                    loss, loss_rec, loss_adv, _ = sess.run([model.loss,
                        model.loss_rec, model.loss_adv, optimize],
                        feed_dict=feed_dict)
                    losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

                    #grad_rec, grad_adv, grad = sess.run([model.grad_rec_norm,
                    #    model.grad_adv_norm, model.grad_norm],
                    #    feed_dict=feed_dict)
                    #gradients.add([grad_rec, grad_adv, grad])

                    step += 1
                    if step % args.steps_per_checkpoint == 0:
                        losses.output('step %d, time %.0fs,'
                            % (step, time.time() - start_time))
                        losses.clear()

                        #gradients.output()
                        #gradients.clear()

                if args.dev:
                    dev_losses = transfer(model, decoder, sess, args, vocab,
                        dev0, dev1, args.output + '.epoch%d' % epoch)
                    dev_losses.output('dev')
                    if dev_losses.values[0] < best_dev:
                        best_dev = dev_losses.values[0]
                        print 'saving model...'
                        model.saver.save(sess, args.model)

                gamma = max(args.gamma_min, gamma * args.gamma_decay)