コード例 #1
0
ファイル: q1_window.py プロジェクト: ziyaochen/CS224n
def do_train(args):
    # Set up some parameters.
    config = Config()
    helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
    embeddings = load_embeddings(args, helper)
    config.embed_size = embeddings.shape[1]
    helper.save(config.output_path)

    handler = logging.FileHandler(config.log_output)
    handler.setLevel(logging.DEBUG)
    handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
    logging.getLogger().addHandler(handler)

    report = None  # Report(Config.eval_output)

    with tf.Graph().as_default():
        logger.info("Building model...", )
        start = time.time()
        model = WindowModel(helper, config, embeddings)
        logger.info("took %.2f seconds", time.time() - start)

        init = tf.global_variables_initializer()
        saver = tf.train.Saver()

        with tf.Session() as session:
            session.run(init)
            model.fit(session, saver, train, dev)
            if report:
                report.log_output(model.output(session, dev_raw))
                report.save()
            else:
                # Save predictions in a text file.
                output = model.output(session, dev_raw)
                sentences, labels, predictions = zip(*output)
                predictions = [[LBLS[l] for l in preds] for preds in predictions]
                output = zip(sentences, labels, predictions)

                with open(model.config.conll_output, 'w') as f:
                    write_conll(f, output)
                with open(model.config.eval_output, 'w') as f:
                    for sentence, labels, predictions in output:
                        print_sentence(f, sentence, labels, predictions)
コード例 #2
0
ファイル: q1_window.py プロジェクト: ziyaochen/CS224n
def do_test2(args):
    logger.info("Testing implementation of WindowModel")
    config = Config()
    helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
    embeddings = load_embeddings(args, helper)
    config.embed_size = embeddings.shape[1]

    with tf.Graph().as_default():
        logger.info("Building model...", )
        start = time.time()
        model = WindowModel(helper, config, embeddings)
        logger.info("took %.2f seconds", time.time() - start)

        init = tf.global_variables_initializer()
        saver = None

        with tf.Session() as session:
            session.run(init)
            model.fit(session, saver, train, dev)

    logger.info("Model did not crash!")
    logger.info("Passed!")
コード例 #3
0
def do_test2(args):
    logger.info("Testing implementation of RNNModel")
    config = Config(args)
    helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
    embeddings = load_embeddings(args, helper)
    config.embed_size = embeddings.shape[1]

    with tf.Graph().as_default():
        logger.info("Building model...", )
        start = time.time()
        model = RNNModel(helper, config, embeddings)
        logger.info("took %.2f seconds", time.time() - start)

        init = tf.global_variables_initializer()
        saver = None

        with tf.Session() as session:
            session.run(init)
            model.fit(session, saver, train, dev)

    logger.info("Model did not crash!")
    logger.info("Passed!")
コード例 #4
0
def do_train(args):
    # Set up some parameters.
    config = Config()
    helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
    embeddings = load_embeddings(args, helper)
    config.embed_size = embeddings.shape[1]
    helper.save(config.output_path)

    handler = logging.FileHandler(config.log_output)
    handler.setLevel(logging.DEBUG)
    handler.setFormatter(
        logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
    logging.getLogger().addHandler(handler)

    report = None  #Report(Config.eval_output)

    logger.info("Building model...", )
    start = time.time()
    model = WindowModel(helper, config, embeddings)
    logger.info("took %.2f seconds", time.time() - start)
    model.apply(init_weights)

    model.fit(train, dev)
    if report:
        report.log_output(model.output(dev_raw))
        report.save()
    else:
        # Save predictions in a text file.
        output = model.output(dev_raw)
        sentences, labels, predictions = zip(*output)
        predictions = [[LBLS[l] for l in preds] for preds in predictions]
        output = zip(sentences, labels, predictions)

        with open(model.config.conll_output, 'w') as f:
            write_conll(f, output)
        with open(model.config.eval_output, 'w') as f:
            for sentence, labels, predictions in output:
                print_sentence(f, sentence, labels, predictions)
コード例 #5
0
    print 'What should the confusion matrix file name be?'
    cmFileName = raw_input()
    '''

    print 'What should the output file name be?'
    outputFileName = raw_input()

    ######################################
    ##           get the data           ##
    ######################################

    # load in the data
    debug = False
    if len(sys.argv) > 2 and sys.argv[2] == "debug":
        debug = True
    helper, train_final_data, dev_final_data, test_final_data, train, dev, test = load_and_preprocess_data(
        debug)
    pretrained_embeddings = load_embeddings(
        helper,
        vocabPath="../Vectors/gloveVocab.txt",
        vectorPath="../Vectors/glove.6B.200d.txt",
        wordFirst=True,
        embed_size=200)

    Config.embed_size = pretrained_embeddings.shape[1]

    # for later
    neverOpened_gold = True
    neverOpened_test = True

    ######################################
    ##           define graph           ##
コード例 #6
0
    def __init__(self, config, pretrained_embeddings):
        self.pretrained_embeddings = pretrained_embeddings
        self.config = config
        self.encoder_inputs = None
        self.decoder_inputs = None
        self.decoder_targets = None
        self.grad_norm = None
        self.build()


if __name__ == '__main__':

    # Get data and embeddings
    start = time.time()
    print("Loading data...")
    train, dev, test, _, _, _, max_x, max_y, E, voc = load_and_preprocess_data(
        output='tokens_debug.txt', debug=True)
    print("Took {} seconds to load data".format(time.time() - start))

    # Set up some parameters.
    print(80 * "=")
    print("INITIALIZING")
    print(80 * "=")
    config = Config()
    config.voc_size = len(voc)
    config.embedding_size = E.shape[1]
    config.max_length_x = 250
    config.max_length_y = 11
    config.voc = voc

    UNK_IDX = voc[UNK_TOKEN]
    START_IDX = voc[START_TOKEN]
コード例 #7
0
    print 'What should the confusion matrix file name be?'
    cmFileName = raw_input()

    print 'What should the output file name be?'
    outputFileName = raw_input()

    ######################################
    ##           get the data           ##
    ######################################

    # load in the data
    debug = False
    if len(sys.argv) > 2 and sys.argv[2] == "debug":
        debug = True
    helper, train_final_data, dev_final_data, test_final_data, train, dev, test, country_dict_key_int = load_and_preprocess_data(
        debug)
    pretrained_embeddings = load_embeddings(
        helper,
        vocabPath="../Vectors/gloveVocab.txt",
        vectorPath="../Vectors/glove.6B.200d.txt",
        wordFirst=True,
        embed_size=200)

    Config.embed_size = pretrained_embeddings.shape[1]

    # for later
    neverOpened_gold = True
    neverOpened_test = True

    ######################################
    ##           define graph           ##
コード例 #8
0
ファイル: main.py プロジェクト: yjy0625/cs677-fall-2018
def main():
    # load data
    print("Loading datasets...")
    dataset, dataset_val, dataset_test = load_and_preprocess_data(
        'cifar-100-python/')
    print("Dataset loading completes.")

    # setup metadata
    metadata = get_metadata('cifar-100-python', 'meta')
    config.data_info = list(dataset['data'].shape[1:])
    config.fine_label_names = metadata['fine_label_names']
    config.coarse_label_names = metadata['coarse_label_names']
    config.label_mapping = get_label_mapping(metadata)

    # create log directory
    hyper_parameter_str = 'bs_{}_lr_{}'.format(
        config.batch_size,
        config.learning_rate,
    )

    if config.apply_batch_norm: hyper_parameter_str += '_batchnorm'
    if config.add_more_layers: hyper_parameter_str += '_morelayers'
    if config.larger_filter_size: hyper_parameter_str += '_largefilter'
    if config.add_dropout: hyper_parameter_str += '_dropout'

    train_dir = './train_dir/{}_{}_{}_{}'.format(
        'cifar10', config.prefix, hyper_parameter_str,
        time.strftime("%Y%m%d-%H%M%S"))

    if not os.path.exists(train_dir): os.makedirs(train_dir)
    print("Train Dir: {}".format(train_dir))

    # reset default graph
    tf.reset_default_graph()

    # create model
    model = Model(config)

    # training setups
    saver = tf.train.Saver(max_to_keep=100)
    summary_writer = tf.summary.FileWriter(train_dir)
    session_config = tf.ConfigProto(
        gpu_options=tf.GPUOptions(allow_growth=True), device_count={'GPU': 1})

    with tf.Session(config=session_config) as session:
        session.run(tf.global_variables_initializer())

        # buffers for train and val losses
        train_losses = []
        val_losses = []

        # train model
        for step in range(config.max_steps):
            # run validation step
            if step % config.val_step == 0:
                val_batch = sample_batch(dataset_val, config.batch_size)
                val_stats = run_single_step(session,
                                            model,
                                            val_batch,
                                            summary_writer,
                                            config.label_mapping,
                                            mode='val')
                val_losses.append([val_stats['step'], val_stats['loss']])

            # run train step
            train_batch = sample_batch(dataset, config.batch_size)
            train_stats = run_single_step(session,
                                          model,
                                          train_batch,
                                          summary_writer,
                                          config.label_mapping,
                                          mode='train',
                                          log=step % config.log_step == 0)
            train_losses.append([train_stats['step'], train_stats['loss']])

            # save checkpoint
            if step % config.save_checkpoint_step == 0:
                print("Saved checkpoint at step {}".format(step))
                saver.save(session,
                           os.path.join(train_dir, 'model'),
                           global_step=step)

        # test model
        test_logfile = os.path.join(train_dir, 'test_result.txt')
        exp_results = run_single_step(session,
                                      model,
                                      dataset_test,
                                      summary_writer,
                                      config.label_mapping,
                                      mode='test',
                                      test_logfile=test_logfile)

        # add loss curves to experiment results
        exp_results['train_losses'] = train_losses
        exp_results['val_losses'] = val_losses

        # log test results
        with open(os.path.join(train_dir, 'test_result.p'), 'wb') as f:
            pickle.dump(exp_results, f)
            print("Logged experiment results to {}".format(f.name))

        # flush Tensorboard summaries
        summary_writer.flush()
コード例 #9
0
        self.config = config
        self.encoder_inputs = None
        self.decoder_inputs = None
        self.decoder_targets = None
        self.length_encoder_inputs = None
        self.length_decoder_inputs = None
        self.grad_norm = None
        self.build()


if __name__ == '__main__':

    # Get data and embeddings
    start = time.time()
    print("Loading data...")
    train, dev, test, _, _, _, max_x, max_y, E, voc = load_and_preprocess_data(
    )
    # train, dev, test, _, _, _, max_x, max_y, E, voc = load_and_preprocess_data(output = 'tokens_debug.txt', debug = True)
    print("Took {} seconds to load data".format(time.time() - start))

    # Set up some parameters.
    print(80 * "=")
    print("INITIALIZING")
    print(80 * "=")
    config = Config()
    config.voc_size = len(voc)
    config.embedding_size = E.shape[1]
    config.max_length_x = 250
    config.max_length_y = 11
    config.voc = voc
    config.idx2word = dict([[v, k] for k, v in voc.items()])