예제 #1
0
    if not os.path.exists(test_file):
        create_data4add(test_file, n_test, digit_max=99)

    for training_mode in [True, False]:  # training & testing
        for batch_size in [1, 10, 100]:
            tf.reset_default_graph()  # Clears the default graph stack and resets the global default graph.
            log.info('')
            log.info('training_mode: %s, batch_size: %s, total_train_time: %s secs' % (training_mode, batch_size, total_train_time))

            model_name = os.path.basename(__file__).replace('.py', '')
            model_file = os.path.join(SAMPLE_MODELS_DIR, '%s.n_train_%s.batch_size_%s.total_train_time_%s/model' % (model_name, n_train, batch_size, total_train_time))
            model_dir = os.path.dirname(model_file)
            log.info('model_name: %s' % model_name)
            log.info('model_file: %s' % model_file)

            scope_name = '%s.%s.batch_size_%s.total_train_time_%s' % (model_name, DateUtil.current_yyyymmdd_hhmm(), batch_size, total_train_time)
            log.info('scope_name: %s' % scope_name)

            with tf.device('/gpu:0'):
                with tf.Graph().as_default():  # for reusing graph
                    checkpoint = tf.train.get_checkpoint_state(model_dir)
                    is_training = True if training_mode or not checkpoint else False  # learning or testing

                    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
                    with tf.Session(config=config) as sess:
                        train_pipeline = input_pipeline([train_file], batch_size=batch_size, shuffle=True, delim='\t', splits=3)
                        valid_pipeline = input_pipeline([valid_file], batch_size=n_valid, shuffle=True, delim='\t', splits=3)
                        test_pipeline = input_pipeline([test_file], batch_size=n_test, shuffle=True, delim='\t', splits=3)

                        if is_training:  # training
                            x, y, learning_rate, W1, b1, y_hat, cost, train_step, summary = create_graph(model_name, scope_name, verbose=False)
예제 #2
0
            log.info('')
            log.info(
                'training_mode: %s, batch_size: %s, total_train_time: %s secs'
                % (training_mode, batch_size, total_train_time))

            model_name = os.path.basename(__file__).replace('.py', '')
            model_file = os.path.join(
                MODELS_DIR,
                '%s.n_train_%s.batch_size_%s.total_train_time_%s/model' %
                (model_name, n_train, batch_size, total_train_time))
            model_dir = os.path.dirname(model_file)
            log.info('model_name: %s' % model_name)
            log.info('model_file: %s' % model_file)

            scope_name = '%s.%s.batch_size_%s.total_train_time_%s' % (
                model_name, DateUtil.current_yyyymmdd_hhmm(), batch_size,
                total_train_time)
            log.info('scope_name: %s' % scope_name)

            with tf.device('/gpu:0'):
                with tf.Graph().as_default():  # for reusing graph
                    checkpoint = tf.train.get_checkpoint_state(model_dir)
                    is_training = True if training_mode or not checkpoint else False  # learning or testing

                    config = tf.ConfigProto(gpu_options=tf.GPUOptions(
                        allow_growth=True, visible_device_list='0'))
                    with tf.Session(config=config) as sess:
                        train_pipeline = input_pipeline([train_file],
                                                        batch_size=batch_size,
                                                        shuffle=True,
                                                        delim='\t',