def __init__(self, name, tasks, checkpoint_dir, keep_best=1, main_task=None, **kwargs): """ Proxy for several translation models that are trained jointly This class presents the same interface as TranslationModel """ super(MultiTaskModel, self).__init__(name, checkpoint_dir, keep_best, **kwargs) self.models = [] self.ratios = [] for task in tasks: self.checkpoint_dir = checkpoint_dir # merging both dictionaries (task parameters have a higher precedence) kwargs_ = dict(**kwargs) kwargs_.update(task) model = TranslationModel(checkpoint_dir=None, keep_best=keep_best, **kwargs_) self.models.append(model) self.ratios.append(task.ratio if task.ratio is not None else 1) self.ratios = [ratio / sum(self.ratios) for ratio in self.ratios] # unit normalization self.main_task = main_task self.global_step = 0 # steps of all tasks combined
def __init__(self, tasks, **kwargs): self.models = [] self.ratios = [] for i, task in enumerate(tasks, 1): if task.name is None: task.name = 'task_{}'.format(i) # merging both dictionaries (task parameters have a higher precedence) kwargs_ = dict(**kwargs) kwargs_.update(task) model = TranslationModel(**kwargs_) self.models.append(model) self.ratios.append(task.ratio if task.ratio is not None else 1) self.main_model = self.models[0] self.ratios = [ratio / sum(self.ratios) for ratio in self.ratios] # unit normalization
def main(args=None): args = parser.parse_args(args) # read config file and default config with open('config/default.yaml') as f: default_config = utils.AttrDict(yaml.safe_load(f)) with open(args.config) as f: config = utils.AttrDict(yaml.safe_load(f)) if args.learning_rate is not None: args.reset_learning_rate = True # command-line parameters have higher precedence than config file for k, v in vars(args).items(): if v is not None: config[k] = v # set default values for parameters that are not defined for k, v in default_config.items(): config.setdefault(k, v) if config.score_function: config.score_functions = evaluation.name_mapping[config.score_function] if args.crash_test: config.max_train_size = 0 if not config.debug: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # disable TensorFlow's debugging logs decoding_mode = any(arg is not None for arg in (args.decode, args.eval, args.align)) # enforce parameter constraints assert config.steps_per_eval % config.steps_per_checkpoint == 0, ( 'steps-per-eval should be a multiple of steps-per-checkpoint') assert decoding_mode or args.train or args.save or args.save_embedding, ( 'you need to specify at least one action (decode, eval, align, or train)') assert not (args.average and args.ensemble) if args.train and args.purge: utils.log('deleting previous model') shutil.rmtree(config.model_dir, ignore_errors=True) os.makedirs(config.model_dir, exist_ok=True) # copy config file to model directory config_path = os.path.join(config.model_dir, 'config.yaml') if args.train and not os.path.exists(config_path): with open(args.config) as config_file, open(config_path, 'w') as dest_file: content = config_file.read() content = re.sub(r'model_dir:.*?\n', 'model_dir: {}\n'.format(config.model_dir), content, flags=re.MULTILINE) dest_file.write(content) # also copy default config config_path = os.path.join(config.model_dir, 'default.yaml') if args.train and not os.path.exists(config_path): shutil.copy('config/default.yaml', config_path) # copy source code to model directory tar_path = os.path.join(config.model_dir, 'code.tar.gz') if args.train and not os.path.exists(tar_path): with tarfile.open(tar_path, "w:gz") as tar: for filename in os.listdir('translate'): if filename.endswith('.py'): tar.add(os.path.join('translate', filename), arcname=filename) logging_level = logging.DEBUG if args.verbose else logging.INFO # always log to stdout in decoding and eval modes (to avoid overwriting precious train logs) log_path = os.path.join(config.model_dir, config.log_file) logger = utils.create_logger(log_path if args.train else None) logger.setLevel(logging_level) utils.log('label: {}'.format(config.label)) utils.log('description:\n {}'.format('\n '.join(config.description.strip().split('\n')))) utils.log(' '.join(sys.argv)) # print command line try: # print git hash commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip() utils.log('commit hash {}'.format(commit_hash)) except: pass utils.log('tensorflow version: {}'.format(tf.__version__)) # log parameters utils.debug('program arguments') for k, v in sorted(config.items(), key=itemgetter(0)): utils.debug(' {:<20} {}'.format(k, pformat(v))) if isinstance(config.dev_prefix, str): config.dev_prefix = [config.dev_prefix] if config.tasks is not None: config.tasks = [utils.AttrDict(task) for task in config.tasks] tasks = config.tasks else: tasks = [config] for task in tasks: for parameter, value in config.items(): task.setdefault(parameter, value) task.encoders = [utils.AttrDict(encoder) for encoder in task.encoders] task.decoders = [utils.AttrDict(decoder) for decoder in task.decoders] for encoder_or_decoder in task.encoders + task.decoders: for parameter, value in task.items(): encoder_or_decoder.setdefault(parameter, value) if args.max_len: args.max_input_len = args.max_len if args.max_output_len: # override decoder's max len task.decoders[0].max_len = args.max_output_len if args.max_input_len: # override encoder's max len task.encoders[0].max_len = args.max_input_len config.checkpoint_dir = os.path.join(config.model_dir, 'checkpoints') # setting random seeds if config.seed is None: config.seed = random.randrange(sys.maxsize) if config.tf_seed is None: config.tf_seed = random.randrange(sys.maxsize) utils.log('python random seed: {}'.format(config.seed)) utils.log('tf random seed: {}'.format(config.tf_seed)) random.seed(config.seed) tf.set_random_seed(config.tf_seed) device = None if config.no_gpu: device = '/cpu:0' device_id = None elif config.gpu_id is not None: device = '/gpu:{}'.format(config.gpu_id) device_id = config.gpu_id else: device_id = 0 # hide other GPUs so that TensorFlow won't use memory on them os.environ['CUDA_VISIBLE_DEVICES'] = '' if device_id is None else str(device_id) utils.log('creating model') utils.log('using device: {}'.format(device)) with tf.device(device): if config.weight_scale: if config.initializer == 'uniform': initializer = tf.random_uniform_initializer(minval=-config.weight_scale, maxval=config.weight_scale) else: initializer = tf.random_normal_initializer(stddev=config.weight_scale) else: initializer = None tf.get_variable_scope().set_initializer(initializer) # exempt from creating gradient ops config.decode_only = decoding_mode if config.tasks is not None: model = MultiTaskModel(**config) else: model = TranslationModel(**config) # count parameters # not counting parameters created by training algorithm (e.g. Adam) variables = [var for var in tf.global_variables() if not var.name.startswith('gradients')] utils.log('model parameters ({})'.format(len(variables))) parameter_count = 0 for var in sorted(variables, key=lambda var: var.name): utils.log(' {} {}'.format(var.name, var.get_shape())) v = 1 for d in var.get_shape(): v *= d.value parameter_count += v utils.log('number of parameters: {:.2f}M'.format(parameter_count / 1e6)) tf_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) tf_config.gpu_options.allow_growth = config.allow_growth tf_config.gpu_options.per_process_gpu_memory_fraction = config.mem_fraction def average_checkpoints(main_sess, sessions): for var in tf.global_variables(): avg_value = sum(sess.run(var) for sess in sessions) / len(sessions) main_sess.run(var.assign(avg_value)) with tf.Session(config=tf_config) as sess: best_checkpoint = os.path.join(config.checkpoint_dir, 'best') params = {'variable_mapping': config.variable_mapping, 'reverse_mapping': config.reverse_mapping, 'rnn_lm_model_dir': None, 'rnn_mt_model_dir': None, 'rnn_lm_cell_name': None, 'origin_model_ckpt': None} if config.ensemble and len(config.checkpoints) > 1: model.initialize(config.checkpoints, **params) elif config.average and len(config.checkpoints) > 1: model.initialize(reset=True) sessions = [tf.Session(config=tf_config) for _ in config.checkpoints] for sess_, checkpoint in zip(sessions, config.checkpoints): model.initialize(sess=sess_, checkpoints=[checkpoint], **params) average_checkpoints(sess, sessions) elif (not config.checkpoints and decoding_mode and (os.path.isfile(best_checkpoint + '.index') or os.path.isfile(best_checkpoint + '.index'))): # in decoding and evaluation mode, unless specified otherwise (by `checkpoints`), # try to load the best checkpoint model.initialize([best_checkpoint], **params) else: # loads last checkpoint, unless `reset` is true model.initialize(**config) if config.output is not None: dirname = os.path.dirname(config.output) if dirname: os.makedirs(dirname, exist_ok=True) try: if args.save: model.save() elif args.save_embedding: if config.embedding_output_dir is None: output_dir = "." else: output_dir = config.embedding_output_dir model.save_embedding(output_dir) elif args.decode is not None: if config.align is not None: config.align = True model.decode(**config) elif args.eval is not None: model.evaluate(on_dev=False, **config) elif args.align is not None: model.align(**config) elif args.train: model.train(**config) except KeyboardInterrupt: sys.exit()
def main(args=None): args = parser.parse_args(args) # read config file and default config with open('config/default.yaml') as f: default_config = utils.AttrDict(yaml.safe_load(f)) with open(args.config) as f: config = utils.AttrDict(yaml.safe_load(f)) if args.learning_rate is not None: args.reset_learning_rate = True # command-line parameters have higher precedence than config file for k, v in vars(args).items(): if v is not None: config[k] = v # set default values for parameters that are not defined for k, v in default_config.items(): config.setdefault(k, v) # enforce parameter constraints assert config.steps_per_eval % config.steps_per_checkpoint == 0, ( 'steps-per-eval should be a multiple of steps-per-checkpoint') assert args.decode is not None or args.eval or args.train or args.align, ( 'you need to specify at least one action (decode, eval, align, or train)' ) assert not (args.avg_checkpoints and args.ensemble) if args.purge: utils.log('deleting previous model') shutil.rmtree(config.model_dir, ignore_errors=True) os.makedirs(config.model_dir, exist_ok=True) # copy config file to model directory config_path = os.path.join(config.model_dir, 'config.yaml') if not os.path.exists(config_path): shutil.copy(args.config, config_path) # also copy default config config_path = os.path.join(config.model_dir, 'default.yaml') if not os.path.exists(config_path): shutil.copy('config/default.yaml', config_path) # copy source code to model directory tar_path = os.path.join(config.model_dir, 'code.tar.gz') if not os.path.exists(tar_path): with tarfile.open(tar_path, "w:gz") as tar: for filename in os.listdir('translate'): if filename.endswith('.py'): tar.add(os.path.join('translate', filename), arcname=filename) logging_level = logging.DEBUG if args.verbose else logging.INFO # always log to stdout in decoding and eval modes (to avoid overwriting precious train logs) log_path = os.path.join(config.model_dir, config.log_file) logger = utils.create_logger(log_path if args.train else None) logger.setLevel(logging_level) utils.log('label: {}'.format(config.label)) utils.log('description:\n {}'.format('\n '.join( config.description.strip().split('\n')))) utils.log(' '.join(sys.argv)) # print command line try: # print git hash commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip() utils.log('commit hash {}'.format(commit_hash)) except: pass utils.log('tensorflow version: {}'.format(tf.__version__)) # log parameters utils.debug('program arguments') for k, v in sorted(config.items(), key=itemgetter(0)): utils.debug(' {:<20} {}'.format(k, pformat(v))) if isinstance(config.dev_prefix, str): config.dev_prefix = [config.dev_prefix] if config.tasks is not None: config.tasks = [utils.AttrDict(task) for task in config.tasks] tasks = config.tasks else: tasks = [config] for task in tasks: for parameter, value in config.items(): task.setdefault(parameter, value) task.encoders = [utils.AttrDict(encoder) for encoder in task.encoders] task.decoders = [utils.AttrDict(decoder) for decoder in task.decoders] for encoder_or_decoder in task.encoders + task.decoders: for parameter, value in task.items(): encoder_or_decoder.setdefault(parameter, value) device = None if config.no_gpu: device = '/cpu:0' elif config.gpu_id is not None: device = '/gpu:{}'.format(config.gpu_id) utils.log('creating model') utils.log('using device: {}'.format(device)) with tf.device(device): config.checkpoint_dir = os.path.join(config.model_dir, 'checkpoints') if config.weight_scale: if config.initializer == 'uniform': initializer = tf.random_uniform_initializer( minval=-config.weight_scale, maxval=config.weight_scale) else: initializer = tf.random_normal_initializer( stddev=config.weight_scale) else: initializer = None tf.get_variable_scope().set_initializer(initializer) config.decode_only = args.decode is not None or args.eval or args.align # exempt from creating gradient ops if config.tasks is not None: model = MultiTaskModel(**config) else: model = TranslationModel(**config) # count parameters utils.log('model parameters ({})'.format(len(tf.global_variables()))) parameter_count = 0 for var in tf.global_variables(): utils.log(' {} {}'.format(var.name, var.get_shape())) if not var.name.startswith( 'gradients' ): # not counting parameters created by training algorithm (e.g. Adam) v = 1 for d in var.get_shape(): v *= d.value parameter_count += v utils.log('number of parameters: {:.2f}M'.format(parameter_count / 1e6)) tf_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) tf_config.gpu_options.allow_growth = config.allow_growth tf_config.gpu_options.per_process_gpu_memory_fraction = config.mem_fraction def average_checkpoints(main_sess, sessions): for var in tf.global_variables(): avg_value = sum(sess.run(var) for sess in sessions) / len(sessions) main_sess.run(var.assign(avg_value)) with tf.Session(config=tf_config) as sess: best_checkpoint = os.path.join(config.checkpoint_dir, 'best') if ((config.ensemble or config.avg_checkpoints) and (args.eval or args.decode is not None) and len(config.checkpoints) > 1): # create one session for each model in the ensemble sessions = [tf.Session() for _ in config.checkpoints] for sess_, checkpoint in zip(sessions, config.checkpoints): model.initialize(sess_, [checkpoint]) if config.ensemble: sess = sessions else: sess = sessions[0] average_checkpoints(sess, sessions) elif (not config.checkpoints and (args.eval or args.decode is not None or args.align) and (os.path.isfile(best_checkpoint + '.index') or os.path.isfile(best_checkpoint + '.index'))): # in decoding and evaluation mode, unless specified otherwise (by `checkpoints`), # try to load the best checkpoint) model.initialize(sess, [best_checkpoint]) else: # loads last checkpoint, unless `reset` is true model.initialize(sess, **config) if args.decode is not None: model.decode(sess, **config) elif args.eval: model.evaluate(sess, on_dev=False, **config) elif args.align: model.align(sess, **config) elif args.train: try: model.train(sess=sess, **config) except (KeyboardInterrupt, utils.FinishedTrainingException): utils.log('exiting...') model.save(sess) sys.exit()