Ejemplo n.º 1
0
def main(_):
    # process command line arguments and configuration
    if FLAGS.config == None:
        raise ValueError(
            "Please specify a configuration file (with --config).")
    else:
        config = configuration.get_config(FLAGS.config)

    TRAIN = FLAGS.train
    VALID = FLAGS.valid
    TEST = FLAGS.test

    if TRAIN and not VALID:
        raise ValueError(
            "Training and validation are always combined. Set both TRAIN = True and VALID = True."
        )

    device = FLAGS.device
    if device == 'cpu':
        os.environ['CUDA_VISIBLE_DEVICES'] = ""  # if you don't want to use GPU

    eval_config = config.copy()  # same parameters for evaluation, except for:
    eval_config['batch_size'] = 1  # batch_size

    # num_steps for testing model = 1, except for:
    # bidirectional model used for predicting: still feed full length of sentence because
    # otherwise the backwards model will have no extra input
    if 'bidirectional' in config and 'predict_next' in config:
        pass
    else:
        eval_config['num_steps'] = 1

    # make sure save_path and log_dir exist
    try:
        os.makedirs(config['save_path'])
    except OSError:
        pass

    try:
        os.makedirs(config['log_dir'])
    except OSError:
        pass

    # if no name for the log file is specified, use the same name as save_path
    if 'log' in config:
        log_file = os.path.join(
            config['log_dir'], '{0}.log'.format(
                os.path.basename(os.path.normpath(config['log']))))
    else:
        log_file = os.path.join(
            config['log_dir'], '{0}.log'.format(
                os.path.basename(os.path.normpath(config['save_path']))))

    # if log file already exists, make a new version by adding a random number (to avoid overwriting)
    if os.path.isfile(log_file):
        rand_num = round(random.random(), 3)
        log_file = log_file.rstrip('.log') + str(rand_num) + '.log'

    fout = file(log_file, 'w', 0)
    # write both to standard output and log file
    sys.stdout = writer(sys.stdout, fout)

    print('configuration:')
    for par, value in config.iteritems():
        print('{0}\t{1}'.format(par, value))

    # read data in appropriate format + adapt configs if necessary
    config, eval_config, data, train_data, valid_data, test_data, (
        TRAIN, VALID, TEST) = read_data(config, eval_config,
                                        (TRAIN, VALID, TEST))

    with tf.Graph().as_default():

        if not 'random' in config:
            # use the same seed for random initialization (to better compare models)
            tf.set_random_seed(1)
        initializer = tf.random_uniform_initializer(
            minval=-config['init_scale'], maxval=config['init_scale'], seed=1)

        if TRAIN:

            reuseOrNot = True  # valid and test models: reuse the graph

            print('Create training model...')
            with tf.name_scope("Train"):
                with tf.variable_scope("Model",
                                       reuse=None,
                                       initializer=initializer):

                    train_lm = create_lm(config, is_training=True, reuse=False)
                    merged = tf.summary.merge_all()
                    train_writer = tf.summary.FileWriter(
                        os.path.join(config['save_path'], 'train'))

                saver = tf.train.Saver()

        else:
            reuseOrNot = None

        if VALID:
            print('Create validation model...')
            with tf.name_scope("Valid"):
                with tf.variable_scope("Model",
                                       reuse=reuseOrNot,
                                       initializer=initializer):
                    valid_lm = create_lm(config,
                                         is_training=False,
                                         reuse=reuseOrNot)

            if reuseOrNot == None:
                reuseOrNot = True

        if TEST:

            print('Create testing model...')
            with tf.name_scope("Test"):
                with tf.variable_scope("Model",
                                       reuse=reuseOrNot,
                                       initializer=initializer):
                    test_lm = create_lm(eval_config,
                                        is_training=False,
                                        reuse=reuseOrNot,
                                        test=True)

        sv = tf.train.Supervisor(logdir=config['save_path'])

        # allow_soft_placement: automatically choose device
        with sv.managed_session(config=tf.ConfigProto(
                allow_soft_placement=True)) as session:

            if TRAIN and VALID:

                train_writer.add_graph(session.graph)

                # create a trainer object based on config file
                class_name = 'trainer.{0}'.format(config['trainer'])
                train_obj = eval(class_name)(session, saver, config, train_lm,
                                             valid_lm, data, train_data,
                                             valid_data)

                # train + validate the model
                train_obj.train()

                train_writer.close()

            if VALID and not TRAIN:

                #validator = run_epoch.run_epoch(session, valid_lm, data, valid_data)
                validator = run_epoch.run_epoch(session,
                                                valid_lm,
                                                data,
                                                valid_data,
                                                eval_op=None,
                                                test=False)

                valid_perplexity = validator()
                print('Valid Perplexity: {0}'.format(valid_perplexity))

            if TEST:

                # test the model
                print('Start testing...')

                if 'rescore' in config or 'debug2' in config or 'predict_next' in config:

                    # read sentence per sentence from file
                    if 'per_sentence' in config and 'stream_data' in config:
                        tester = run_epoch.rescore(session,
                                                   test_lm,
                                                   data,
                                                   test_data,
                                                   eval_op=None,
                                                   test=True)

                        data_file = data.init_batching(test_data)

                        end_reached = False

                        while True:

                            if 'bidirectional' in config:
                                length_batch = test_lm.num_steps + 1
                                x, _, end_reached, seq_lengths = data.get_batch(
                                    data_file,
                                    test=True,
                                    num_steps=length_batch)
                            else:
                                x, _, end_reached, _ = data.get_batch(
                                    data_file, test=True)

                            if end_reached:
                                print('Done')
                                sys.exit(0)

                            else:
                                tester(x[0])

                    # normal sentence-level rescoring
                    # test_data already contains all data
                    else:

                        tester = run_epoch.rescore(session,
                                                   test_lm,
                                                   data,
                                                   test_data,
                                                   eval_op=None,
                                                   test=True)

                        len_test = len(test_data)
                        counter = 0
                        for line in test_data:
                            tester(line)
                            counter += 1
                            if counter % 100 == 0:
                                print(
                                    '{0} sentences processed'.format(counter))

                        print('Done')
                        sys.exit(0)

                else:
                    tester = run_epoch.run_epoch(session,
                                                 test_lm,
                                                 data,
                                                 test_data,
                                                 eval_op=None,
                                                 test=True)

                    test_perplexity = tester()

                    print('Test Perplexity: {0}'.format(test_perplexity))
Ejemplo n.º 2
0
def main(_):
    # process command line arguments and configuration
    if FLAGS.config == None:
        raise ValueError(
            "Please specify a configuration file (with --config).")
    else:
        config = configuration.get_config(FLAGS.config)

    TRAIN = FLAGS.train
    VALID = FLAGS.valid
    TEST = FLAGS.test

    if TRAIN and not VALID:
        raise ValueError(
            "Training and validation are always combined. Set both TRAIN = True and VALID = True."
        )

    device = FLAGS.device
    if device == 'cpu':
        os.environ['CUDA_VISIBLE_DEVICES'] = ""  # if you don't want to use GPU

    eval_config = config.copy()  # same parameters for evaluation, except for:
    eval_config['batch_size'] = 1  # batch_size
    eval_config['num_steps'] = 1  # and number of steps

    try:
        os.makedirs(config['save_path'])
    except OSError:
        pass

    final_model = config['save_path'] + os.path.basename(
        os.path.normpath(config['save_path'])) + '.final'

    if os.path.isfile(final_model) and TRAIN:
        raise OSError(
            "{0} already exists. If you want to re-train the model, remove the model file and its checkpoints."
            .format(final_model))

    if 'log' in config:
        log_file = LOG_DIR + os.path.basename(os.path.normpath(
            config['log'])) + '.log'
    else:
        log_file = LOG_DIR + os.path.basename(
            os.path.normpath(config['save_path'])) + '.log'
    # if log file already exists, make a new version by adding a random number (to avoid overwriting)
    if os.path.isfile(log_file):
        rand_num = round(random.random(), 3)
        log_file = log_file.strip('.log') + str(rand_num) + '.log'

    fout = file(log_file, 'w', 0)
    # write both to standard output and log file
    sys.stdout = writer(sys.stdout, fout)

    print('configuration:')
    for par, value in config.iteritems():
        print('{0}\t{1}'.format(par, value))

    # read data in appropriate format + adapt configs if necessary
    config, eval_config, data, train_data, valid_data, test_data, (
        TRAIN, VALID, TEST) = read_data(config, eval_config,
                                        (TRAIN, VALID, TEST))

    with tf.Graph().as_default():

        initializer = tf.random_uniform_initializer(
            minval=-config['init_scale'], maxval=config['init_scale'])

        if TRAIN:

            reuseOrNot = True  # valid and test models: reuse the graph

            print('Create training model...')
            with tf.name_scope("Train"):
                with tf.variable_scope("Model",
                                       reuse=None,
                                       initializer=initializer):
                    train_lm = lm.lm(config, is_training=True, reuse=False)

        else:
            reuseOrNot = None

        if VALID:
            print('Create validation model...')
            with tf.name_scope("Valid"):
                with tf.variable_scope("Model",
                                       reuse=reuseOrNot,
                                       initializer=initializer):
                    valid_lm = lm.lm(config,
                                     is_training=False,
                                     reuse=reuseOrNot)

            if reuseOrNot == None:
                reuseOrNot = True

        if TEST:

            print('Create testing model...')
            with tf.name_scope("Test"):
                with tf.variable_scope("Model",
                                       reuse=reuseOrNot,
                                       initializer=initializer):
                    test_lm = lm.lm(eval_config,
                                    is_training=False,
                                    reuse=reuseOrNot)

        sv = tf.train.Supervisor(logdir=config['save_path'])

        with sv.managed_session(config=tf.ConfigProto(
                allow_soft_placement=True)) as session:

            if TRAIN and VALID:

                # create a trainer object based on config file
                class_name = 'trainer.{0}'.format(config['trainer'])
                train_obj = eval(class_name)(sv, session, config, train_lm,
                                             valid_lm, data, train_data,
                                             valid_data)

                # train + validate the model
                train_obj.train()

            if VALID and not TRAIN:

                validator = run_epoch.run_epoch(session,
                                                valid_lm,
                                                data,
                                                valid_data,
                                                eval_op=None,
                                                test=False,
                                                valid=True)

                valid_perplexity = validator()
                print('Valid Perplexity: {0}'.format(valid_perplexity))

            if TEST:

                # n-best rescoring
                if 'rescore' in config:
                    print('Start rescoring.')

                    tester = run_epoch.rescore(session,
                                               test_lm,
                                               data,
                                               test_data,
                                               eval_op=None,
                                               test=True)

                    len_test = len(test_data)
                    counter = 0
                    for line in test_data:
                        tester(line)
                        counter += 1
                        if counter % 100 == 0:
                            print(counter)

                    print('Done rescoring.')

                # test the model
                else:

                    tester = run_epoch.run_epoch(session,
                                                 test_lm,
                                                 data,
                                                 test_data,
                                                 eval_op=None,
                                                 test=True)

                    test_perplexity = tester()

                    print('Test Perplexity: {0}'.format(test_perplexity))