def builder(self, *params): try: return ModelFromJsonTF(None, json_str=self._json(*params)) except: str_param = ','.join('{0}={1!r}'.format(k, v) for k, v in self._args(*params).items()) print("Failed to build model with params: {}".format(str_param)) return None
per_process_gpu_memory_fraction=0.1, #was 0.0 allow_growth=True, visible_device_list=device[-1] if 'gpu' in device else '') if hide_device: gpu_options = K.tf.GPUOptions( per_process_gpu_memory_fraction=0.0, allow_growth=True, ) K.set_session( K.tf.Session( config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options))) if args.tf: model_builder = ModelFromJsonTF(comm, args.model_json, device_name=device, weights=args.model_weights) print("Process {0} using device {1}".format( comm.Get_rank(), model_builder.device)) else: model_builder = ModelFromJson(comm, args.model_json, weights=args.model_weights) print("Process {0} using device {1}".format( comm.Get_rank(), device)) os.environ[ 'THEANO_FLAGS'] = "profile=%s,device=%s,floatX=float32" % ( args.profile, device.replace('gpu', 'cuda')) # GPU ops need to be executed synchronously in order for profiling to make sense if args.profile: os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
def builder(self, *params): args = dict(zip([p.name for p in self.parameters], params)) model_json = self.model_fn(**args) return ModelFromJsonTF(None, json_str=model_json)