def create(self, queue_name, cluster_name, policy, charge=None, unit=None): """ This method is used to create a queue :param queue_name: name of the queue to create :param cluster_name: slurm cluster on which the job is gonna run :param policy: policy of the queue :param charge: charge of the queue :param unit: unit of the charge for the queue :return: """ name = Name(order=["cloud", "name"], cloud=cluster_name, name=queue_name) uid = name.id(cloud=cluster_name, name=queue_name) print(uid) self.info = Munch({ 'uid': uid, "cloud": cluster_name, "kind": "batch-queue", "name": queue_name, "cm": { "cloud": cluster_name, "kind": "batch-queue", "name": queue_name, "cluster": self.cm_config.get('cloudmesh').get('cluster')[cluster_name] }, "queue": { 'policy': policy, 'status': 'EMPTY', 'active': False, 'charge': charge, 'unit': unit, "numJobs": 0, "numRunningJobs": 0, } }) self.policyFunctionMap = Munch({ 'FIFO': self.popFIFO, 'FILO': self.popFILO }) # list of parameters that can be set self.settable_params = ['policy', 'charge', 'unit'] if self.database.exists(self.info)[0]: Console.error("Queue already exists") return return [self.info]
def create(self, queue_name, cluster_name, policy, charge=None, unit=None): """ This method is used to create a job for running on remote slurm cluster :param queue_name: name of the queue to create :param cluster_name: slurm cluster on which the job is gonna run :return: """ name = Name(order=["cloud", "name"], cloud=cluster_name, name=queue_name) uid = name.id(cloud=cluster_name, name=queue_name) print(uid) self.queue = { 'uid': uid, "cloud": cluster_name, "kind": "batch-queue", "name": queue_name, "cm": { "cloud": cluster_name, "kind": "batch-queue", "name": queue_name, "cluster": self.cm_config.get('cloudmesh').get('cluster')[cluster_name] }, "queue": { 'policy': policy, 'status': 'Empty', 'active': False, 'charge': charge, 'unit': unit, "numJobs": 0, "numRunningJobs": 0, } } if self.database.exists(self.queue)[0]: Console.error("Queue already exists") return return [self.queue]
def findQueue(self, cloud_name, queue_name): ''' finds a queue in the database based on the name :param name: name of the queue :return: ''' # if self.database.exists(self.info)[0]: # Console.error("Queue already exists") name = Name(order=["cloud", "name"], cloud=cloud_name, name=queue_name) uid = name.id(cloud=cloud_name, name=queue_name) queue = self.database.find_by_KeyValue( collection_name="{cloud}-{kind}".format(cloud=cloud_name, kind='batch-queue'), KeyValue={'uid': uid}) if type(queue) is cursor.Cursor: self.info = munch.munchify(queue[0]) return True # # queue found elif type(queue) is list and len(queue) == 0: return False # queue not found
class TestMongo: def setup(self): self.database = CmDatabase() self.name = Name(experiment="exp", group="grp", user="******", kind="vm", counter=1) def test_00_status(self): HEADING() #print(self.name) #print(self.name.counter) #print(self.name.id(counter=100)) self.database.clear() r = self.database.find() pprint(r) assert len(r) == 0 def test_01_status(self): HEADING() r = self.database.status() # pprint(r) assert "Connection refused" not in r d = {} for field in ['uptime', 'pid', 'version', 'host']: d[field] = r[field] print(Printer.attribute(d)) assert d is not None def test_02_update(self): HEADING() entries = [{"name": "Gregor"}, {"name": "Laszewski"}] for entry in entries: entry["cmid"] = str(self.name) entry["cmcounter"] = self.name.counter self.name.incr() self.database.update(entries) r = self.database.find() pprint(r) assert len(r) == 2 def test_03_update(self): HEADING() r = self.database.find(name="Gregor") pprint(r) assert r[0]['name'] == "Gregor" def test_04_update(self): HEADING() entries = [{ "cmcounter": 1, "name": "gregor" }, { "cmcounter": 2, "name": "laszewski" }] pprint(entries) for entry in entries: counter = entry["cmcounter"] print("Counter:", counter) entry["cmid"] = self.name.id(counter=counter) self.database.update(entries, replace=False) r = self.database.find() pprint(r) def test_05_update(self): HEADING() r = self.database.find(name="gregor") pprint(r) assert r[0]["name"] == "gregor" def test_06_find_by_counter(self): HEADING() r = self.database.find_by_counter(1) pprint(r) assert r[0]["name"] == "gregor" r = self.database.find_by_counter(2) pprint(r) assert r[0]["name"] == "laszewski" def test_07_decorator_update(self): HEADING() @DatabaseUpdate(collection="cloudmesh") def entry(): name = Name() print(name) print("OOOO", str(name), name.counter) d = { "cmid": str(name), "cmcounter": name.counter, "name": "albert" } name.incr() pprint(d) return d a = entry() r = self.database.find_by_counter(3) pprint(r) def test_08_decorator_add(self): HEADING() @DatabaseAdd(collection="cloudmesh") def entry(): d = {"name": "zweistein"} return d a = entry() r = self.database.find() pprint(r) assert len(r) == 4 def test_09_overwrite(self): HEADING() r = self.database.find(name="gregor")[0] pprint(r) r["color"] = "red" self.database.update([r], replace=True) r = self.database.find(color="red") pprint(r) assert len(r) == 1 def test_10_fancy(self): HEADING() counter = 1 n = Name(experiment="exp", group="grp", user="******", kind="vm", counter=counter) print(n) entries = [{ "cmcounter": counter, "cmid": str(n), "name": "gregor", "phone": "android" }] self.database.update(entries, replace=True) r = self.database.find() pprint(r) assert len(r) == 4
def create(self, job_name, cluster_name, script_path, executable_path, destination, source, experiment_name, companion_file): """ This method is used to create a job for running on remote slurm cluster :param job_name: name of the job to create :param cluster_name: slurm cluster on which the job is gonna run :param script_path: path of the slurm script :param executable_path: path of the executable that is going to be run on the cluster via slurm script :param destination: path in the remotes on which the scripts is gonna be copied to and ran from :param source: local path to which the results are gonna be copied :param experiment_name: experiment name and suffix of the filenames in the job :param companion_file: path of the file that has to be passed to the file as an argument if any :param overwrite: if the job already exists, this flag overwrites the previous job with the same name :return: """ # if self.batch_config.get('job-metadata') is not None and job_name in \ # list(self.batch_config.get('job-metadata').keys()) and overwrite is False: # raise RuntimeError("The job {} exists in the configuration file, if you want to overwrite the job, \ # use --overwrite argument.".format(job_name)) # tmp_cluster = {cluster_name: dict(slurm_cluster)} # slurm_cluster = self.cm_config.get('cloudmesh').get('cluster')[cluster_name] # self.batch_config.deep_set(['slurm_cluster'], tmp_cluster) name = Name(order=["name","experiment_name"], name=job_name, experiment_name=experiment_name) uid = name.id(name=job_name, experiment_name=experiment_name) print(uid) # return # TODO: remove cloud and kind after fixing CmDatabased update self.job = { 'uid': uid, "cloud": cluster_name, "kind": "batch-job", "name" :job_name, "cm": { "cloud": cluster_name, "kind": "batch-job", "name": job_name, "cluster": self.cm_config.get('cloudmesh').get('cluster')[cluster_name] }, "batch": { "status": "pending", 'script_path': script_path.as_posix(), 'executable_path': executable_path.as_posix(), 'destination': destination.as_posix(), 'source': source.as_posix(), 'experiment_name': experiment_name, 'companion_file': companion_file.as_posix() } } # self.job = { # "cloud": cluster_name, # "kind": "batch-job", # "name": job_name, # "cluster": self.cm_config.get('cloudmesh').get('cluster')[ # cluster_name], # "status": "pending", # 'script_path': script_path.as_posix(), # 'executable_path': executable_path.as_posix(), # 'destination': destination.as_posix(), # 'source': source.as_posix(), # 'experiment_name': experiment_name, # 'companion_file': companion_file.as_posix() # } # job['destination'] = os.path.join(job['remote_path'], job['script_name']) # job['remote_slurm_script_path'] = os.path.join(job['remote_path'], job['slurm_script_name']) # job_metadata = {job_name: job} # self.batch_config.deep_set(['job-metadata'], job_metadata) # data = self.job_specification() if self.database.exists(self.job)[0]: Console.error("Job already exists") return return [self.job]