def main(args, defaults): parameters = process_args(args, defaults) os.environ['CUDA_VISIBLE_DEVICES'] = str(parameters.gpu_id) logging.basicConfig( level=logging.DEBUG, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s', filename=parameters.log_path) console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess: model = Model(phase=parameters.phase, gpu_id=parameters.gpu_id, channel=parameters.channel, mean=parameters.mean, visualize=parameters.visualize, use_gru=parameters.use_gru, load_model=parameters.load_model, data_dir=parameters.data_dir, label_path=parameters.label_path, lexicon_file=parameters.lexicon_file, model_dir=parameters.model_dir, output_dir=parameters.output_dir, steps_per_checkpoint=parameters.steps_per_checkpoint, num_epoch=parameters.num_epoch, batch_size=parameters.batch_size, initial_learning_rate=parameters.initial_learning_rate, clip_gradients=parameters.clip_gradients, max_gradient_norm=parameters.max_gradient_norm, target_embedding_size=parameters.target_embedding_size, attn_num_hidden=parameters.attn_num_hidden, attn_num_layers=parameters.attn_num_layers, valid_target_length=float('inf'), session=sess) print('model init end, launch start...') model.launch()
def test(): defaults = exp_config.ExpConfig parameters = dict() parameters['log_path'] = 'log.txt' parameters['phase'] = 'train' parameters['visualize'] = defaults.VISUALIZE parameters['data_path'] = 'train.txt' parameters['data_root_dir'] = '../data/date' parameters['lexicon_file'] = 'lexicon.txt' parameters['output_dir'] = defaults.OUTPUT_DIR parameters['batch_size'] = 4 parameters['initial_learning_rate'] = 1.0 parameters['num_epoch'] = 30 parameters['steps_per_checkpoint'] = 200 parameters['target_vocab_size'] = defaults.TARGET_VOCAB_SIZE parameters['model_dir'] = '../output' parameters['target_embedding_size'] = 10 parameters['attn_num_hidden'] = defaults.ATTN_NUM_HIDDEN parameters['attn_num_layers'] = defaults.ATTN_NUM_LAYERS parameters['clip_gradients'] = defaults.CLIP_GRADIENTS parameters['max_gradient_norm'] = defaults.MAX_GRADIENT_NORM parameters['load_model'] = defaults.LOAD_MODEL parameters['gpu_id'] = defaults.GPU_ID parameters['use_gru'] = False logging.basicConfig( level=logging.DEBUG, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s', filename=parameters['log_path']) console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess: model = Model( phase=parameters['phase'], visualize=parameters['visualize'], data_path=parameters['data_path'], data_root_dir=parameters['data_root_dir'], output_dir=parameters['output_dir'], batch_size=parameters['batch_size'], initial_learning_rate=parameters['initial_learning_rate'], num_epoch=parameters['num_epoch'], steps_per_checkpoint=parameters['steps_per_checkpoint'], target_vocab_size=parameters['target_vocab_size'], model_dir=parameters['model_dir'], target_embedding_size=parameters['target_embedding_size'], attn_num_hidden=parameters['attn_num_hidden'], attn_num_layers=parameters['attn_num_layers'], clip_gradients=parameters['clip_gradients'], max_gradient_norm=parameters['max_gradient_norm'], load_model=parameters['load_model'], valid_target_length=float('inf'), gpu_id=parameters['gpu_id'], use_gru=parameters['use_gru'], session=sess) model.launch()