def __init__(self, infer_func, param_path, place=None, parallel=False): """ :param infer_func: a function that will return predict Variable :param param_path: the path where the inference model is saved by fluid.io.save_params :param place: place to do the inference :param parallel: use parallel_executor to run the inference, it will use multi CPU/GPU. """ self.param_path = param_path self.scope = core.Scope() self.parallel = parallel self.place = check_and_get_place(place) self.inference_program = framework.Program() with framework.program_guard(self.inference_program): with unique_name.guard(): self.predict_var = infer_func() with self._prog_and_scope_guard(): # load params from param_path into scope io.load_params(executor.Executor(self.place), param_path) if parallel: with self._prog_and_scope_guard(): self.exe = parallel_executor.ParallelExecutor( use_cuda=isinstance(self.place, core.CUDAPlace), loss_name=self.predict_var.name) else: self.exe = executor.Executor(self.place)
def __init__(self, program_func, optimizer, param_path=None, place=None): # 1. we need to generate a framework.Program by calling # program_func. Reference: fluid.program_guard in # test_word2vec.py self.scope = core.Scope() self.startup_program = framework.Program() self.train_program = framework.Program() with framework.program_guard(self.train_program, self.startup_program): loss = program_func() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") optimize_ops, params_grads = optimizer.minimize(loss) self.place = Trainer._check_and_get_place(place) self.dist_transpile_if_necessary(optimize_ops, params_grads) # 2. move the default_main_program to self.program and run the # default_startup program on an empty core.Scope() # Run startup program with self._prog_and_scope_guard(): exe = executor.Executor(place) exe.run(self.startup_program) if param_path: # load params from param_path into scope io.load_persistables(exe, dirname=param_path)
def __init__(self, train_func, optimizer, param_path=None, place=None, parallel=False): self.__stop = False self.parallel = parallel # 1. we need to generate a framework.Program by calling # program_func. Reference: fluid.program_guard in # test_word2vec.py if not isinstance(optimizer, opt_module.Optimizer): raise TypeError("The optimizer should be an instance of Optimizer") self.scope = core.Scope() self.startup_program = framework.Program() self.train_program = framework.Program() with framework.program_guard(self.train_program, self.startup_program): program_func_outs = train_func() self.train_func_outputs = program_func_outs if isinstance( program_func_outs, list) else [program_func_outs] self.test_program = self.train_program.clone() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") # The fisrt element of program_func_outs is loss. loss = self.train_func_outputs[0] optimize_ops, params_grads = optimizer.minimize(loss) self.place = check_and_get_place(place) self._dist_transpile_if_necessary(optimize_ops, params_grads) # 2. move the default_main_program to self.program and run the # default_startup program on an empty core.Scope() # Run startup program with self._prog_and_scope_guard(): exe = executor.Executor(place) exe.run(self.startup_program) if param_path: # load params from param_path into scope io.load_persistables(exe, dirname=param_path)
def __init__(self, network_func, param_path=None, place=None): # 1. we need to generate a framework.Program by calling # network_func. Reference: fluid.program_guard in test_word2vec.py # 2. move the default_main_program to self.program. # 3. run the default_startup program. # 4. load params from param_path into scope self.scope = core.Scope() self.place = place self.startup_program = framework.Program() # TODO: generate the startup_program with network_func exe = executor.Executor(place) exe.run(self.startup_program, scope=self.scope) if param_path: # load params from param_path into scope io.load_persistables(exe, dirname=param_path)
def __init__(self, infer_func, param_path, place=None, parallel=False): self.param_path = param_path self.scope = core.Scope() self.parallel = parallel self.place = check_and_get_place(place) self.inference_program = framework.Program() with framework.program_guard(self.inference_program): with unique_name.guard(): self.predict_var = infer_func() with self._prog_and_scope_guard(): # load params from param_path into scope io.load_params(executor.Executor(self.place), param_path) if parallel: with self._prog_and_scope_guard(): self.exe = parallel_executor.ParallelExecutor( use_cuda=isinstance(self.place, core.CUDAPlace), loss_name=self.predict_var.name) else: self.exe = executor.Executor(self.place) self.inference_program = self.inference_program.clone(for_test=True)
def __init__(self, train_func, optimizer_func, param_path=None, place=None, parallel=False, checkpoint_config=None): self.__stop = False self.parallel = parallel # config for checkpoint # only chief worker will save variables self.trainer_id = 0 self.checkpoint_cfg = checkpoint_config if self.checkpoint_cfg: assert isinstance(self.checkpoint_cfg, CheckpointConfig) serial = io.get_latest_checkpoint_serial( self.checkpoint_cfg.checkpoint_dir) self.checkpoint_cfg.load_serial = serial if serial >= 0 else None self.scope = core.Scope() # 1. we need to generate a framework.Program by calling # program_func. Reference: fluid.program_guard in # test_word2vec.py self.startup_program = framework.Program() self.train_program = framework.Program() with framework.program_guard(self.train_program, self.startup_program): program_func_outs = train_func() self.train_func_outputs = program_func_outs if isinstance( program_func_outs, list) else [program_func_outs] self.test_program = self.train_program.clone(for_test=True) # The first element of program_func_outs is loss. loss = self.train_func_outputs[0] optimizer = optimizer_func() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") optimize_ops, params_grads = optimizer.minimize(loss) self.place = check_and_get_place(place) self._dist_transpile_if_necessary(optimize_ops, params_grads) # 2. move the default_main_program to self.program and run the # default_startup program on an empty core.Scope() # Run startup program with self._prog_and_scope_guard(): exe = executor.Executor(place) exe.run(self.startup_program) if self.checkpoint_cfg and self.checkpoint_cfg.load_serial: with self._prog_and_scope_guard(): exe = executor.Executor(place) io.load_checkpoint(exe, self.checkpoint_cfg.checkpoint_dir, self.checkpoint_cfg.load_serial, self.startup_program) if not self.checkpoint_cfg.is_pserver: epoch_id, step_id = io.load_trainer_args( self.checkpoint_cfg.checkpoint_dir, self.checkpoint_cfg.load_serial, self.trainer_id, self._get_checkpoint_load_args()) self.checkpoint_cfg.epoch_id = int(epoch_id) self.checkpoint_cfg.step_id = int(step_id) if param_path and os.path.isdir(param_path): # load params from param_path into scope io.load_persist_vars_without_grad( exe, dirname=param_path, program=self.startup_program)
def do_POST(self): """ Handle POST requests """ if self.path == "/update" or self.path == "/run": # Git webhook for running SAAD on a repo if self.headers.get('Content-Type') != 'application/json': return self.write_json_problem_details( HTTPStatus.UNSUPPORTED_MEDIA_TYPE, "{\"title\": \"Invalid Content-Type\"," "\"detail\": \"Expected request to have Content-Type application/json (got " + self.headers.get('Content-Type') + ")\"}") content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) try: params = json.loads(body.decode('utf-8')) except ValueError: return self.write_json_problem_details( HTTPStatus.BAD_REQUEST, "{\"title\": \"Invalid JSON data\"," "\"detail\": \"Unable to load given JSON data\"}") try: ref = params['ref'] previous_commit = params['before'] current_commit = params['after'] clone_url = params['repository']['clone_url'] except KeyError: return self.write_json_problem_details( HTTPStatus.BAD_REQUEST, "{\"title\": \"Invalid JSON data\"," "\"detail\": \"Given JSON data missing expected field(s)\"}" ) if self.path == "/update": #Auto update endpoint if clone_url == SERVER_REPO_URL: self.send_response(HTTPStatus.OK) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write("Updating...\n".encode()) # TODO make sure response is sent? new_args = [ sys.argv[0], '--clone_url', clone_url, '--current_commit', current_commit, '--previous_commit', previous_commit ] return threading.Thread(target=update_self, args=( httpd, new_args, )).start() else: return self.write_json_problem_details( HTTPStatus.UNPROCESSABLE_ENTITY, "{\"title\": \"Invalid repo URL\"," "\"detail\": \"Will not update as the provided repo <" + clone_url + "> is not the expected server repo\"}") elif self.path == "/run": #Run endpoint repo_name = check_repo_url(clone_url) if repo_name: self.send_response(HTTPStatus.OK) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write("Running...\n".encode()) serverRepo.child_repos[repo_name].commits.pop( 'current', None) return serverRepo.child_repos[repo_name].run_all_probes( current_commit, previous_commit) else: return self.write_json_problem_details( HTTPStatus.UNPROCESSABLE_ENTITY, "{\"title\": \"Invalid repo URL\"," "\"detail\": \"Provided repo <" + clone_url + "> is not tracked.\"}") elif self.path == "/run/module": # TODO API? #Runs a probe with the given input data if self.handle_auth(): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) data = {} for value in body.decode().split("&"): data.update( {value.split("=", 1)[0]: value.split("=", 1)[1]}) module_name = data.pop("module_name") repo_name = data.pop("repo_name") repo = serverRepo if repo_name in serverRepo.child_repos: repo = serverRepo.child_repos[repo_name] repo.modules[module_name].run_probe(data, core.Scope({}), repo) return else: self.send_response(HTTPStatus.NOT_FOUND) self.end_headers() return