from util.decode import decode, decode_to_output, exclude_list, get_exclude_list, truncate_sents from model.postprocess import PostProcess from nltk.translate.bleu_score import sentence_bleu from util.mteval_bleu import MtEval_BLEU import tensorflow as tf from os.path import exists from os import remove, listdir, makedirs import math import numpy as np import time from util.arguments import get_args from copy import deepcopy from model.model_config import get_path args = get_args() def get_graph_val_data(objs, model_config, it, data): input_feed = {} # Reserved section of vocabuary are same. voc = data.vocab_simple if model_config.subword_vocab_size > 0: pad_id = voc.encode(constant.SYMBOL_PAD) else: pad_id = [voc.encode(constant.SYMBOL_PAD)] output_tmp_sentence_simple, output_tmp_sentence_complex, \ output_tmp_sentence_complex_raw, output_tmp_sentence_complex_raw_lines, \ output_tmp_mapper, output_tmp_ref_raw_lines = [], [], [], [], [], []
--output_directory 'processed/bam' \ --walltime '06:00:00' \ --nodes 2 \ --cores 12 \ --humanonly The specific pipeline using this script is given in `wes_pipeline.sh` """ import os import yaml import util.arguments as arguments # Load command arguments args = arguments.get_args() command = args.which genome = args.genome input_dir = args.input_directory output_dir = args.output_directory config = args.config_yaml walltime = args.walltime nodes = str(args.nodes) cores = str(args.cores) # Load configuration with open(config, 'r') as stream: config = yaml.load(stream) # Load constants python = config['python']