示例#1
0
 def _init_processes(self):
     self.processes = []
     self._unique_hosts = get_hosts_set(self.deployment.hosts)
     frontends = dict()
     for host in self._unique_hosts:
         frontend = _get_host_frontend(host)
         if frontend in frontends:
             frontends[frontend].append(host)
         else:
             frontends[frontend] = [host]
     lifecycle_handler = ActionNotificationProcessLH(self, len(frontends))
     deploy_stdout_handler = _KadeployStdoutHandler()
     for frontend in frontends:
         kadeploy_command = self.deployment._get_common_kadeploy_command_line()
         for host in frontends[frontend]:
             kadeploy_command += " -m %s" % (host.address,)
         p = get_process(kadeploy_command,
                         host = get_frontend_host(frontend),
                         connection_params = make_connection_params(self.frontend_connection_params,
                                                                  default_frontend_connection_params))
         p.pty = True
         p.timeout = self.timeout
         p.stdout_handlers.append(deploy_stdout_handler)
         p.stdout_handlers.extend([ FrontendPrefixWrapper(h)
                                    for h in singleton_to_collection(self._stdout_handlers) ])
         p.stderr_handlers.extend([ FrontendPrefixWrapper(h)
                                    for h in singleton_to_collection(self._stderr_handlers) ])
         p.lifecycle_handlers.append(lifecycle_handler)
         p.frontend = frontend
         p.kadeploy_hosts = [ host.address for host in frontends[frontend] ]
         p.deployed_hosts = set()
         p.undeployed_hosts = set()
         p.kadeployer = self
         self.processes.append(p)
示例#2
0
def filter_clusters(clusters, queues="default"):
    """Filter a list of clusters on their queue(s).

    Given a list of clusters, return the list filtered, keeping only
    clusters that have at least one oar queue matching one in the list
    of filter queues passed in parameters. The cluster queues are
    taken from the queues attributes in the grid5000 API. If this
    attribute is missing, the cluster is considered to be in queues
    ["admin", "default", "besteffort"]

    :param clusters: list of clusters

    :param queues: a queue name or a list of queues. clusters will be
      kept in the returned filtered list only if at least one of their
      queues matches one queue of this parameter. If queues = None or
      False, no filtering at all is done. By default, keeps clusters
      in queue "default".
    """

    if queues == None or queues == False:
        return clusters
    queues = singleton_to_collection(queues)
    filtered_clusters = []
    for cluster in clusters:
        cluster_queues = get_cluster_attributes(cluster).get("queues")
        if not cluster_queues:
            cluster_queues = ["admin", "default", "besteffort"]
        for q in queues:
            if q in cluster_queues:
                filtered_clusters.append(cluster)
                break
    return filtered_clusters
示例#3
0
def filter_clusters(clusters, queues = "default"):
    """Filter a list of clusters on their queue(s).

    Given a list of clusters, return the list filtered, keeping only
    clusters that have at least one oar queue matching one in the list
    of filter queues passed in parameters. The cluster queues are
    taken from the queues attributes in the grid5000 API. If this
    attribute is missing, the cluster is considered to be in queues
    ["admin", "default", "besteffort"]

    :param clusters: list of clusters

    :param queues: a queue name or a list of queues. clusters will be
      kept in the returned filtered list only if at least one of their
      queues matches one queue of this parameter. If queues = None or
      False, no filtering at all is done. By default, keeps clusters
      in queue "default".
    """

    if queues == None or queues == False:
        return clusters
    queues = singleton_to_collection(queues)
    filtered_clusters = []
    for cluster in clusters:
        cluster_queues = get_cluster_attributes(cluster).get("queues")
        if not cluster_queues:
            cluster_queues = [ "admin", "default", "besteffort" ]
        for q in queues:
            if q in cluster_queues:
                filtered_clusters.append(cluster)
                break
    return filtered_clusters
示例#4
0
 def _init_processes(self):
     self.processes = []
     self._unique_hosts = get_hosts_set(self.deployment.hosts)
     frontends = dict()
     for host in self._unique_hosts:
         frontend = _get_host_frontend(host)
         if frontend in frontends:
             frontends[frontend].append(host)
         else:
             frontends[frontend] = [host]
     lifecycle_handler = ActionNotificationProcessLH(self, len(frontends))
     deploy_stdout_handler = _KadeployStdoutHandler()
     for frontend in frontends:
         kadeploy_command = self.deployment._get_common_kadeploy_command_line(
         )
         for host in frontends[frontend]:
             kadeploy_command += " -m %s" % (host.address, )
         p = get_process(kadeploy_command,
                         host=get_frontend_host(frontend),
                         connection_params=make_connection_params(
                             self.frontend_connection_params,
                             default_frontend_connection_params))
         p.pty = True
         p.timeout = self.timeout
         p.stdout_handlers.append(deploy_stdout_handler)
         p.stdout_handlers.extend([
             FrontendPrefixWrapper(h)
             for h in singleton_to_collection(self._stdout_handlers)
         ])
         p.stderr_handlers.extend([
             FrontendPrefixWrapper(h)
             for h in singleton_to_collection(self._stderr_handlers)
         ])
         p.lifecycle_handlers.append(lifecycle_handler)
         p.frontend = frontend
         p.kadeploy_hosts = [host.address for host in frontends[frontend]]
         p.deployed_hosts = set()
         p.undeployed_hosts = set()
         p.kadeployer = self
         self.processes.append(p)
示例#5
0
文件: oar.py 项目: msimonin/execo
def get_oarsub_commandline(job_spec):
    oarsub_cmdline = 'oarsub'
    if job_spec.additional_options != None:
        oarsub_cmdline += ' %s' % (job_spec.additional_options, )
    if job_spec.resources:
        oarsub_cmdline += ' -l "' + '+'.join(
            singleton_to_collection(job_spec.resources))
        if job_spec.walltime != None:
            oarsub_cmdline += ',walltime=%s' % (format_oar_duration(
                job_spec.walltime), )
        oarsub_cmdline += '"'
    elif job_spec.walltime != None:
        oarsub_cmdline += ' -l "walltime=%s"' % (format_oar_duration(
            job_spec.walltime), )
    key = g5k_configuration.get('oar_job_key_file')
    if key == None:
        key = os.environ.get('OAR_JOB_KEY_FILE')
    if key != None:
        oarsub_cmdline += ' -k -i %s' % (key, )
    if job_spec.job_type != None:
        for t in singleton_to_collection(job_spec.job_type):
            oarsub_cmdline += ' -t "%s"' % (t, )
    if job_spec.sql_properties != None:
        oarsub_cmdline += ' -p "%s"' % (job_spec.sql_properties, )
    if job_spec.queue != None:
        oarsub_cmdline += ' -q "%s"' % (job_spec.queue, )
    if job_spec.reservation_date != None:
        oarsub_cmdline += ' -r "%s"' % (format_oar_date(
            job_spec.reservation_date), )
    if job_spec.directory != None:
        oarsub_cmdline += ' -d "%s"' % (job_spec.directory, )
    if job_spec.project != None:
        oarsub_cmdline += ' --project "%s"' % (job_spec.project, )
    if job_spec.name != None:
        oarsub_cmdline += ' -n "%s"' % (job_spec.name, )
    if job_spec.command != None:
        oarsub_cmdline += ' "%s"' % (job_spec.command, )
    else:
        oarsub_cmdline += ' "sleep 31536000"'
    return oarsub_cmdline
示例#6
0
文件: oar.py 项目: mickours/execo
def get_oarsub_commandline(job_spec):
    oarsub_cmdline = 'oarsub'
    if job_spec.additional_options != None:
        oarsub_cmdline += ' %s' % (job_spec.additional_options,)
    if job_spec.resources:
        oarsub_cmdline += ' -l "' + '+'.join(singleton_to_collection(job_spec.resources))
        if job_spec.walltime != None:
            oarsub_cmdline += ',walltime=%s' % (format_oar_duration(job_spec.walltime),)
        oarsub_cmdline += '"'
    elif job_spec.walltime != None:
        oarsub_cmdline += ' -l "walltime=%s"' % (format_oar_duration(job_spec.walltime),)
    key = g5k_configuration.get('oar_job_key_file')
    if key == None:
        key = os.environ.get('OAR_JOB_KEY_FILE')
    if key != None:
        oarsub_cmdline += ' -k -i %s' % (key,)
    if job_spec.job_type != None:
        for t in singleton_to_collection(job_spec.job_type):
            oarsub_cmdline += ' -t "%s"' % (t,)
    if job_spec.sql_properties != None:
        oarsub_cmdline += ' -p "%s"' % (job_spec.sql_properties,)
    if job_spec.queue != None:
        oarsub_cmdline += ' -q "%s"' % (job_spec.queue,)
    if job_spec.reservation_date != None:
        oarsub_cmdline += ' -r "%s"' % (format_oar_date(job_spec.reservation_date),)
    if job_spec.directory != None:
        oarsub_cmdline += ' -d "%s"' % (job_spec.directory,)
    if job_spec.project != None:
        oarsub_cmdline += ' --project "%s"' % (job_spec.project,)
    if job_spec.name != None:
        oarsub_cmdline += ' -n "%s"' % (job_spec.name,)
    if job_spec.command != None:
        oarsub_cmdline += ' "%s"' % (job_spec.command,)
    else:
        oarsub_cmdline += ' "sleep 31536000"'
    return oarsub_cmdline