def session_config(params): optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1, do_function_inlining=True) graph_options = tf.GraphOptions(optimizer_options=optimizer_options) config = tf.ConfigProto(allow_soft_placement=True, graph_options=graph_options) if distribute.is_distributed_training_mode(): config.gpu_options.visible_device_list = str(distribute.local_rank()) elif params.device_list: device_str = ",".join([str(i) for i in params.device_list]) config.gpu_options.visible_device_list = device_str return config
def session_config(params): optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1, do_function_inlining=True) graph_options = tf.GraphOptions(optimizer_options=optimizer_options) config = tf.ConfigProto(allow_soft_placement=True, graph_options=graph_options) from tensorflow.python.client import device_lib n_gpus = sum(1 for d in device_lib.list_local_devices() if d.device_type == 'GPU') if n_gpus > 1: params.device_list = list(range(n_gpus)) if distribute.is_distributed_training_mode(): config.gpu_options.visible_device_list = str(distribute.local_rank()) elif params.device_list: device_str = ",".join([str(i) for i in params.device_list]) config.gpu_options.visible_device_list = device_str return config