def remove_arc_to(self, path): """ FIXME remove the path to the path """ config_file = metadata.ConfigFile(path + "/.chern/config.json") pred_str = config_file.read_variable("predecessors") pred_str.remove(self.path) config_file.write_variable("predecessors", pred_str) config_file = metadata.ConfigFile(self.path + "/.chern/config.json") succ_str = config_file.read_variable("successors") succ_str.remove(path) config_file.write_variable("successors", succ_str)
def remove_arc_from(self, path): """ FIXME Remove link from the path Just copied from "remove_arc_from" """ config_file = metadata.ConfigFile(path + "/.chern/config.json") succ_str = config_file.read_variable("successors") succ_str.remove(self.path) config_file.write_variable("successors", succ_str) config_file = metadata.ConfigFile(self.path + "/.chern/config.json") pred_str = config_file.read_variable("predecessors") pred_str.remove(path) config_file.write_variable("predecessors", pred_str)
def projects(self): """ Get the list of all the projects. If there is not a list create one. """ local_config_file = metadata.ConfigFile(self.local_config_path) projects_path = local_config_file.read_variable("projects_path", {}) return list(projects_path.keys())
def add_arc_to(self, path): """ FIXME: Add a link from this object to the path object """ config_file = metadata.ConfigFile(path + "/.chern/config.json") pred_str = config_file.read_variable("predecessors") if pred_str is None: pred_str = [] pred_str.append(self.path) config_file.write_variable("predecessors", pred_str) config_file = metadata.ConfigFile(self.path + "/.chern/config.json") succ_str = config_file.read_variable("successors") if succ_str is None: succ_str = [] succ_str.append(path) config_file.write_variable("successors", succ_str)
def build(self): """ Build the image to change the status of the Algorithm to builded. It will create a unique VImage object and the md5 of the VImage will be saved. """ """ What to do: first: copy all the files to a temporary file directory and next write a docker file then, you should build the docker file """ run_path = os.path.join(self.path, self.machine_storage()) os.chdir(run_path) ps = subprocess.Popen("docker build .", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ps.wait() if ps.poll() != 0: raise Exception(ps.stderr.read().decode()) info = ps.communicate()[0] image_id = info.split()[-1] status_file = metadata.ConfigFile(os.path.join(run_path, "status.json")) status_file.write_variable("image_id", image_id.decode())
def get_current_project(self): """ Get the name of the current working project. If there isn't a working project, return None """ local_config_file = metadata.ConfigFile(self.local_config_path) current_project = local_config_file.read_variable( "current_project", None) if current_project is None: return None else: projects_path = local_config_file.read_variable("projects_path") path = projects_path.get(current_project, "no_place|") if path == "no_place|": projects_path[current_project] = "no_place|" if not os.path.exists(path): projects_path.pop(current_project) if projects_path != {}: current_project = list(projects_path.keys())[0] else: current_project = None local_config_file.write_variable("current_project", current_project) local_config_file.write_variable("projects_path", projects_path) return self.get_current_project() else: return current_project
def parameters(self): """ Read the parameters file """ parameters_file = metadata.ConfigFile(self.path + "/contents/parameters.json") parameters = parameters_file.read_variable("parameters", {}) return sorted(parameters.keys()), parameters
def storage(self): dirs = csys.list_dir(self.path) for run in dirs: if run.startswith("run.") or run.startswith("raw."): config_file = metadata.ConfigFile( os.path.join(self.path, run, "status.json")) status = config_file.read_variable("status", "submitted") if status == "done": return run return ""
def image_id(self): dirs = csys.list_dir(self.path) for run in dirs: if run.startswith("run."): config_file = metadata.ConfigFile( os.path.join(self.path, run, "status.json")) status = config_file.read_variable("status", "submitted") if status == "built": return config_file.read_variable("image_id") return ""
def feed(impression, path): dst = os.path.join(os.environ["HOME"], ".ChernMachine/Storage", impression) print(dst) if not csys.exists(dst): print("Impression {} does not exists.".format(impression)) return uid = "raw." + uuid.uuid4().hex print(path, os.path.join(dst, uid, "output")) csys.copy_tree(path, os.path.join(dst, uid, "output")) config_file = metadata.ConfigFile(os.path.join(dst, uid, "status.json")) config_file.write_variable("status", "done")
def execute(): running_jobs = cherndb.jobs("running") if len(running_jobs) > 3: return waitting_jobs = cherndb.jobs("submitted") # print("List {0}".format(waitting_jobs), file=sys.stderr) for job in waitting_jobs: print("Running {0}".format(job), file=sys.stderr) if job.satisfied(): print("chern_machine execute {}".format(job.path), file=sys.stderr) # FIXME Make sure the job will not be executed many times status_file = metadata.ConfigFile(os.path.join(job.path, "status.json")) subprocess.Popen("chern_machine execute {}".format(job.path), shell=True) while (job.status() == "submitted"): pass
def execute(self): run_path = os.path.join(self.path, self.machine_storage()) csys.copy_tree(os.path.join(self.path, "contents"), run_path) status_file = metadata.ConfigFile(os.path.join(run_path, "status.json")) status_file.write_variable("status", "building") entrypoint = open(os.path.join(run_path, "entrypoint.sh"), "w") entrypoint.write("""#!/bin/bash\n$@\n""") entrypoint.close() try: self.build() except Exception as e: self.append_error("Fail to build the image!\n" + str(e)) status_file.write_variable("status", "failed") raise e status_file.write_variable("status", "built")
def start(self): ps = subprocess.Popen("docker start -a {0}".format( self.container_id()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) run_path = os.path.join(self.path, self.machine_storage()) config_file = metadata.ConfigFile(os.path.join(run_path, "status.json")) config_file.write_variable("docker_run_pid", ps.pid) ps.wait() run_path = os.path.join(self.path, self.machine_storage()) stdout = os.path.join(run_path, "stdout") with open(stdout, "w") as f: f.write(ps.stdout.read().decode()) return (ps.poll() == 0)
def execute(self): run_path = os.path.join(self.path, self.machine_storage()) status_file = metadata.ConfigFile(os.path.join(run_path, "status.json")) status_file.write_variable("status", "running") try: self.create_arguments_file() self.create_container() self.copy_arguments_file() status = self.start() except Exception as e: status_file.write_variable("status", "failed") self.append_error(str(e)) raise e if status: status_file.write_variable("status", "done") else: status_file.write_variable("status", "failed") self.append_error("Run error")
def status(self): dirs = csys.list_dir(self.path) if self.is_locked(): return "locked" running = False for run in dirs: if run.startswith("run.") or run.startswith("raw."): config_file = metadata.ConfigFile( os.path.join(self.path, run, "status.json")) status = config_file.read_variable("status") if status == "done": return status if status == "failed": return status if status == "running": running = True if self.is_raw(): return "raw" if running: return "running" return "submitted"
def status(self): dirs = csys.list_dir(self.path) for run in dirs: if run.startswith("run."): config_file = metadata.ConfigFile( os.path.join(self.path, run, "status.json")) status = config_file.read_variable("status", "submitted") print("status is ", status, file=sys.stderr) if status != "submitted": return status if self.is_locked(): return "locked" return "submitted" status = self.config_file.read_variable("status") if status is None: return "submitted" else: return status
def create_container(self, container_type="task"): mounts = "-v {1}:/data/{0}".format( self.impression(), os.path.join(self.path, self.machine_storage(), "output")) for input_container in self.inputs(): mounts += " -v {1}:/data/{0}:ro".format( input_container.impression(), os.path.join(input_container.path, input_container.storage(), "output")) image_id = self.image().image_id() ps = subprocess.Popen("docker create {0} {1}".format(mounts, image_id), shell=True, stdout=subprocess.PIPE) print("docker create {0} {1}".format(mounts, image_id), file=sys.stderr) ps.wait() container_id = ps.stdout.read().decode().strip() run_path = os.path.join(self.path, self.machine_storage()) config_file = metadata.ConfigFile(os.path.join(run_path, "status.json")) config_file.write_variable("container_id", container_id)
def machine_storage(self): config_file = metadata.ConfigFile( os.path.join(os.environ["HOME"], ".ChernMachine/config.json")) machine_id = config_file.read_variable("machine_id") return "run." + machine_id
def register(): config_file = metadata.ConfigFile( os.path.join(os.environ["HOME"], ".ChernMachine/config.json")) config_file.write_variable("machine_id", uuid.uuid4().hex) config_file.write_variable("runner_type", "docker")
def __init__(self, path): """ Initialize the project the only **information** of a object instance """ self.path = csys.strip_path_string(path) self.config_file = metadata.ConfigFile(self.path + "/config.json")
def container_id(self): run_path = os.path.join(self.path, self.machine_storage()) config_file = metadata.ConfigFile(os.path.join(run_path, "status.json")) container_id = config_file.read_variable("container_id") return container_id
def is_locked(self): status_file = metadata.ConfigFile( os.path.join(self.path, "status.json")) status = status_file.read_variable("status") return status == "locked"
def add_job(self, job_id): storage_path = csys.storage_path() jobs_list_file = metadata.ConfigFile(storage_path + "/jobs.json") job_id_list = jobs_list_file.read_variable("jobs_list", []) job_id_list.append(job_id) jobs_list_file.write_variable("jobs_list", job_id_list)