예제 #1
0
def _get_api():
    """Retrieve data from the Grid'5000 API"""
    logger.info('Retrieving data from API...')

    backbone_th = threading.Thread(target = __get_backbone)
    backbone_th.start()

    site_th = {}
    for site in sorted(_get_g5k_sites_uncached()):
        t = threading.Thread(target = __get_site, args = (site,))
        t.start()
        site_th[site] = t

    for t in [ backbone_th ] + list(site_th.values()):
        t.join()

    data = {'network': {},
            'sites': {},
            'clusters': {},
            'hosts':  {},
            'hierarchy': {}}
    data['network']['backbone'] = backbone_th.backbone_data
    for site in site_th:
        data['network'][site] = site_th[site].network_data
        data['sites'][site] = site_th[site].site_data
        data['hierarchy'][site] = {}
        for cluster in site_th[site].cluster_data:
            data['clusters'][cluster] = site_th[site].cluster_data[cluster]
            data['hierarchy'][site][cluster] = []
            for host in site_th[site].host_data[cluster]:
                data['hosts'][host] = site_th[site].host_data[cluster][host]
                data['hierarchy'][site][cluster].append(host)

    return data
예제 #2
0
    def handle_starttag(self, tag, attrs):
        #logger.debug("Encountered a start tag: %s",tag)
        if "input" in tag and self.current_counter > 0 and self.nb_results <= self.max_results:
            logger.debug("Encountered a start tag: %s", tag)
            # ww8 / ww9 can be used to detect the downloading URL
            if ("ww8" in attrs[2][1]
                    or "ww9" in attrs[2][1]) and "url" in attrs[1][1]:
                url = attrs[2][1]
                logger.debug("URL of the song")
                logger.debug(url)
                self.current_counter -= 1

                logger.debug("Protecting the address with double quotes")
                url = '"' + url + '"'

                try:
                    os.mkdir(self.destination_path)
                except OSError:
                    logger.info("The folder %s already exists",
                                self.destination_path)

                os.system("wget " + url + " -O " + self.destination_path +
                          "/" + "XY")
                if 'keep_original' in self.naming:  # We make a choice between the original name of the file or the search terms
                    os.rename(
                        self.destination_path + "/" + "XY",
                        self.destination_path + "/" + self.artist.title() +
                        " - " + self.title.title() + ".mp3")
                else:
                    os.rename(
                        self.destination_path + "/" + "XY",
                        self.destination_path + "/" + self.data.title() +
                        ".mp3")

                return
예제 #3
0
파일: api_utils.py 프로젝트: mickours/execo
def set_nodes_vlan(site, hosts, interface, vlan_id):
    """Set the interface of a list of hosts in a given vlan

    :param site: Site name

    :param hosts: List of hosts

    :param interface: The interface to put in the vlan

    :param vlan_id: Id of the vlan to use
    """

    def _to_network_address(host):
        """Translate a host to a network address

        e.g:
        paranoia-20.rennes.grid5000.fr -> paranoia-20-eth2.rennes.grid5000.fr
        """
        splitted = host.address.split('.')
        splitted[0] = splitted[0] + "-" + interface
        return ".".join(splitted)

    network_addresses = map(_to_network_address, hosts)
    logger.info("Setting %s in vlan %s of site %s" % (network_addresses, vlan_id, site))
    return _get_g5k_api().post('/sites/%s/vlans/%s' % (site, str(vlan_id)), {"nodes": network_addresses})
예제 #4
0
def set_nodes_vlan(site, hosts, interface, vlan_id):
    """Set the interface of a list of hosts in a given vlan

    :param site: Site name

    :param hosts: List of hosts

    :param interface: The interface to put in the vlan

    :param vlan_id: Id of the vlan to use
    """
    def _to_network_address(host):
        """Translate a host to a network address

        e.g:
        paranoia-20.rennes.grid5000.fr -> paranoia-20-eth2.rennes.grid5000.fr
        """
        splitted = host.address.split('.')
        splitted[0] = splitted[0] + "-" + interface
        return ".".join(splitted)

    network_addresses = map(_to_network_address, hosts)
    logger.info("Setting %s in vlan %s of site %s" %
                (network_addresses, vlan_id, site))
    return _get_g5k_api().post('/sites/%s/vlans/%s' % (site, str(vlan_id)),
                               {"nodes": network_addresses})
예제 #5
0
def main():
    args = parser.parse_args()
 
    # Retrieve the right number of lines
    try:
        nodesFile = open(args.nodes_address_file)
        nodesInfos = [line for line in nodesFile]
    except IOError as e:
       print "I/O error({0}) on "+args.nodes_address_file+": {1}".format(e.errno, e.strerror)
       sys.exit()
    
    hosts  = [s.strip().split(':')[0] for s in nodesInfos]
    hosts.append(args.service_node)
    frontends = list(set([str('frontend.'+get_host_site(h)) for h in hosts]))
    

    ## Remove the old DHT-EXP hierarchy 
    logger.info('Remove old files on each NFS server involved in the experiment ('+str(frontends)+')')
    whoami=os.getlogin()
    cmd = 'rm -rf ~/SLOTH-EXP-TMP' 
    TaktukRemote(cmd, frontends, connection_params={'user': str(whoami)}).run()


    ## Copy the DHT-EXP hierarchy to the remote site
    logger.info('Copy sloth and injector files on each NFS server involved in the experiment ('+str(frontends)+')')
    TaktukRemote('mkdir ~/SLOTH-EXP-TMP/', frontends, connection_params={'user': str(whoami)}).run()
    TaktukPut(frontends, ['SLOTH_HOME'],'/home/'+str(os.getlogin())+'/SLOTH-EXP-TMP/.', connection_params={'user': str(whoami)}).run()
    TaktukPut(frontends, ['INJECTOR_HOME'],'/home/'+str(os.getlogin())+'/SLOTH-EXP-TMP/.', connection_params={'user': str(whoami)}).run()
    TaktukPut(frontends,  ['peers.info'], '/tmp/'+str(os.getlogin())+'-peers.info', connection_params={'user': str(whoami)}).run()


    test = TaktukPut(frontends, ['' ], connection_params={'user': str(whoami)}).run()
예제 #6
0
    def _start_disk_copy(self, disks=None):
        """ """
        disks_copy = []
        if not disks:
            disks = self.backing_files
        for bf in disks:
            logger.info('Treating ' + style.emph(bf))
            logger.debug("Checking frontend disk vs host disk")
            raw_disk = '/tmp/orig_' + bf.split('/')[-1]
            f_disk = Process('md5sum -b ' + bf).run()
            disk_hash = f_disk.stdout.split(' ')[0]
            cmd = 'if [ -f ' + raw_disk + ' ]; ' + \
                'then md5sum  -b ' + raw_disk + '; fi'
            h_disk = self.fact.get_remote(cmd, self.hosts).run()
            disk_ok = True
            for p in h_disk.processes:

                if p.stdout.split(' ')[0] != disk_hash:
                    disk_ok = False
                    break
            if disk_ok:
                logger.info("Disk " + style.emph(bf) +
                            " is already present, skipping copy")
            else:
                disks_copy.append(
                    self.fact.get_fileput(self.hosts, [bf],
                                          remote_location="/tmp"))
        if len(disks_copy) > 0:
            self.copy_actions = ParallelActions(disks_copy).start()
        else:
            self.copy_actions = Remote('ls', self.hosts[0]).run()
예제 #7
0
 def start_servers(self):
     servers = [host for host in self.servers]
     logger.info("Initialize the SeD")
     
     logger.debug("Compile the executables")
     cmd = "cd "+sched_dir+"; make clean && make"
     a = Remote(cmd, servers, connection_params = root_connection_params).run()
     for s in a.processes:
         pout = s.stdout
     logger.debug(pout)
     
     site = self.site
     cmd = "sed -i 's/LA_"+site+"/MA1/g' /root/dietg/cfgs/server.cfg;"
     a = Remote(cmd, servers, connection_params = root_connection_params).run()
     
     cmd = "cd /root/dietg/; ./set_sed.sh"
     a = Remote(cmd, servers, connection_params = root_connection_params).run()
     
     cmd = "if [ -e /root/dietg/log/total.jobs ]; then rm /root/dietg/log/total.jobs; fi"
     a = Remote(cmd, servers, connection_params = root_connection_params).run()
     
     cmd = "if [ -e /root/dietg/log/current.jobs ]; then rm /root/dietg/log/current.jobs; fi"
     a = Remote(cmd, servers, connection_params = root_connection_params).run()
     
     logger.info("Done!")
예제 #8
0
파일: deployment.py 프로젝트: badock/vm5k
    def _create_backing_file(self, disks=None, backing_file_dir='/tmp'):
        """ """
        if not self.copy_actions:
            self._start_disk_copy(disks)
        if not self.copy_actions.ended:
            logger.info("Waiting for the end of the disks copy")
            self.copy_actions.wait()
        if isinstance(self.copy_actions, ParallelActions):
            mv_actions = []
            for act in self.copy_actions.actions:
                fname = act.local_files[0].split('/')[-1]
                mv_actions.append(self.fact.get_remote("mv %s/" % backing_file_dir + fname +
                                                       " %s/orig_" % backing_file_dir + fname,
                                                       self.hosts))

            mv = ParallelActions(mv_actions).run()

        if not disks:
            disks = self.backing_files
        for bf in disks:
            raw_disk = '%s/orig_' % backing_file_dir + bf.split('/')[-1]
            to_disk = '%s/' % backing_file_dir + bf.split('/')[-1]
            self.fact.get_remote('cp ' + raw_disk + ' ' + to_disk, self.hosts).run()
            logger.info('Copying ssh key on ' + to_disk + ' ...')
            cmd = 'modprobe nbd max_part=16; ' + \
                'qemu-nbd --connect=/dev/nbd0 ' + to_disk + \
                ' ; sleep 3 ; partprobe /dev/nbd0 ; ' + \
                'part=`fdisk -l /dev/nbd0 |grep dev|grep Linux| grep -v swap|cut -f 1 -d " "` ; ' + \
                'mount $part /mnt ; mkdir -p /mnt/root/.ssh ; ' + \
                'cat /root/.ssh/authorized_keys >> /mnt/root/.ssh/authorized_keys ; ' + \
                'cp -r /root/.ssh/id_rsa* /mnt/root/.ssh/ ;' + \
                'umount /mnt; qemu-nbd -d /dev/nbd0'
            logger.detail(cmd)
            copy_on_vm_base = self.fact.get_remote(cmd, self.hosts).run()
            self._actions_hosts(copy_on_vm_base)
예제 #9
0
파일: deployment.py 프로젝트: badock/vm5k
    def _start_disk_copy(self, disks=None, backing_file_dir='/tmp'):
        """ """
        disks_copy = []
        if not disks:
            disks = self.backing_files
        for bf in disks:
            logger.info('Treating ' + style.emph(bf))
            logger.debug("Checking frontend disk vs host disk")
            raw_disk = '%s/orig_' % backing_file_dir + bf.split('/')[-1]
            f_disk = Process('md5sum -b ' + bf).run()
            disk_hash = f_disk.stdout.split(' ')[0]
            cmd = 'if [ -f ' + raw_disk + ' ]; ' + \
                'then md5sum  -b ' + raw_disk + '; fi'
            h_disk = self.fact.get_remote(cmd, self.hosts).run()
            disk_ok = True
            for p in h_disk.processes:

                if p.stdout.split(' ')[0] != disk_hash:
                    disk_ok = False
                    break
            if disk_ok:
                logger.info("Disk " + style.emph(bf) +
                            " is already present, skipping copy")
            else:
                disks_copy.append(self.fact.get_fileput(self.hosts, [bf],
                                                        remote_location="%s" % backing_file_dir))
        if len(disks_copy) > 0:
            self.copy_actions = ParallelActions(disks_copy).start()
        else:
            self.copy_actions = Remote('ls', self.hosts[0]).run()
예제 #10
0
파일: deployment.py 프로젝트: lpouillo/vm5k
    def __init__(self, infile=None, resources=None, hosts=None,
                 ip_mac=None, vlan=None,
                 env_name=None, env_file=None, vms=None,
                 distribution=None, outdir=None):
        """:param infile: an XML file that describe the topology of the
        deployment

        :param resources: a dict whose keys are Grid'5000 sites and values are
        dict, whose keys are hosts and ip_mac, where hosts is a list of
        execo.Host and ip_mac is a list of tuple (ip, mac).

        :param env_name: name of the Kadeploy environment

        :param env_file: path to the Kadeploy environment file

        :params vms: dict defining the virtual machines

        :params distribution: how to distribute the vms on the hosts
        (``round-robin`` , ``concentrated``, ``random``)

        :params outdir: directory to store the deployment files
        """
        # set a factory for the deployment that use taktuk and chainput
        self.fact = ActionFactory(remote_tool=TAKTUK,
                                  fileput_tool=CHAINPUT,
                                  fileget_tool=TAKTUK)
        self.kavlan = None if not vlan else vlan
        self.kavlan_site = None
        if env_name is not None:
            self.env_file = None
            if ':' not in env_name:
                self.env_name, self.env_user = env_name, None
            else:
                self.env_user, self.env_name = env_name.split(':')
        else:
            if env_file is not None:
                self.env_name = None
                self.env_user = None
                self.env_file = env_file
            else:
                self.env_name = 'vm5k'
                self.env_user = '******'
                self.env_file = None

        if outdir:
            self.outdir = outdir
        else:
            self.outdir = 'vm5k_' + strftime("%Y%m%d_%H%M%S_%z")

        self.copy_actions = None

        self.state = Element('vm5k')
        self._define_elements(infile, resources, hosts, vms, ip_mac,
                              distribution)

        logger.info('%s %s %s %s %s %s %s %s',
                    len(self.sites), style.emph('sites'),
                    len(self.clusters), style.user1('clusters'),
                    len(self.hosts), style.host('hosts'),
                    len(self.vms), style.vm('vms'))
예제 #11
0
def _munin_server(server, clients):
    """Install the monitoring service munin. Must be executed inside Grid'5000
    to be able to resolve the server and clients IP.

    :param server: a execo.Host

    :param clients: a list of execo.Hosts

    :param plugins: a list of munin plugins

    """
    logger.info(
        'Munin monitoring service installation, server = %s, clients = \n %s',
        server.address, [host.address for host in clients])

    logger.debug('Configuring munin server %s', style.host('server'))
    cmd = 'export DEBIAN_MASTER=noninteractive ; apt-get update && apt-get install -y munin'
    inst_munin_server = SshProcess(cmd, server).run()

    logger.debug('Creating configuration files for server')
    fd, server_conf = mkstemp(dir='/tmp/', prefix='munin-nodes_')
    f = fdopen(fd, 'w')
    for host in clients:
        get_ip = Process('host ' + host.address).run()
        ip = get_ip.stdout.strip().split(' ')[3]
        f.write('[' + host.address + ']\n    address ' + ip +
                '\n   use_node_name yes\n\n')
    f.close()

    Put([server], [server_conf], remote_location='/etc/').run()
    SshProcess('cd /etc && cp ' + server_conf.split('/')[-1] + ' munin.conf',
               server).run()
    Process('rm ' + server_conf).run()
예제 #12
0
파일: munin.py 프로젝트: badock/vm5k
def _munin_server(server, clients):
    """Install the monitoring service munin. Must be executed inside Grid'5000
    to be able to resolve the server and clients IP.

    :param server: a execo.Host

    :param clients: a list of execo.Hosts

    :param plugins: a list of munin plugins

    """
    logger.info('Munin monitoring service installation, server = %s, clients = \n %s',
                server.address, [host.address for host in clients])

    logger.debug('Configuring munin server %s', style.host('server'))
    cmd = 'export DEBIAN_MASTER=noninteractive ; apt-get update && apt-get install -y munin'
    inst_munin_server = SshProcess(cmd, server).run()

    logger.debug('Creating configuration files for server')
    fd, server_conf = mkstemp(dir='/tmp/', prefix='munin-nodes_')
    f = fdopen(fd, 'w')
    for host in clients:
        get_ip = Process('host '+host.address).run()
        ip = get_ip.stdout.strip().split(' ')[3]
        f.write('['+host.address+']\n    address '+ip+'\n   use_node_name yes\n\n')
    f.close()

    Put([server], [server_conf], remote_location='/etc/').run()
    SshProcess('cd /etc && cp '+server_conf.split('/')[-1]+' munin.conf', server).run()
    Process('rm '+server_conf).run()
예제 #13
0
파일: actions.py 프로젝트: badock/vm5k
def destroy_vms(hosts, undefine=False):
    """Destroy all the VM on the hosts"""
    logger.info('Destroying vms from %s' % hosts)
    # retry destroying
    all_destroyed = False
    attempt = 0
    while not all_destroyed:
        attempt += 1
        logger.info('Destroying vms: attempt #%s' % attempt)
        cmds = []
        hosts_with_vms = []
        hosts_vms = list_vm(hosts, not_running=True)

        for host, vms in hosts_vms.iteritems():
            logger.info('Destroying %s vms from host %s' % (len(vms), host))
            if len(vms) > 0:
                if not undefine:
                    cmds.append('; '.join('virsh destroy ' + vm['id']
                                          for vm in vms))
                else:
                    cmds.append('; '.join('virsh destroy ' + vm['id'] + '; '
                                          'virsh undefine ' + vm['id']
                                          for vm in vms))
                hosts_with_vms.append(host)
        if len(cmds) > 0:
            TaktukRemote('{{cmds}}', hosts_with_vms).run()
        if len(hosts_with_vms) == 0:
            all_destroyed = True
            logger.info('Destroying finished')
        if attempt == 30:
            logger.info('Destroying so many times, unsuccessfully')
            break
예제 #14
0
 def _remove_existing_disks(self, hosts=None):
     """Remove all img and qcow2 file from /tmp directory """
     logger.info('Removing existing disks')
     if hosts is None:
         hosts = self.hosts
     remove = self.fact.get_remote('rm -f /tmp/*.img; rm -f /tmp/*.qcow2',
                                   self.hosts).run()
     self._actions_hosts(remove)
예제 #15
0
def set_cpu_load(load, vm_ip, pid):
    """Use cpulimit to change process intensity on vm"""
    logger.info('kill cpu_limit on %s and set it to %s', vm_ip, load)
    kill_cpu_limit = SshProcess('ps aux| grep "cpulimit" | grep -v "grep" | awk \'{print $2}\' | xargs -r kill -9',
                                vm_ip).run()
    start_cpu_limit = SshProcess('cpulimit -p ' + str(pid) + ' -l ' + str(load), vm_ip)
    start_cpu_limit.nolog_exit_code = start_cpu_limit.ignore_exit_code = True
    start_cpu_limit.start()
예제 #16
0
파일: deployment.py 프로젝트: badock/vm5k
 def _remove_existing_disks(self, hosts=None):
     """Remove all img and qcow2 file from /tmp directory """
     logger.info('Removing existing disks')
     if hosts is None:
         hosts = self.hosts
     remove = self.fact.get_remote('rm -f /tmp/*.img; rm -f /tmp/*.qcow2',
                                   self.hosts).run()
     self._actions_hosts(remove)
예제 #17
0
파일: actions.py 프로젝트: lpouillo/vm5k
def show_vms(vms):
    """Print a short resume of vms parameters.

    :params vms: a list containing a dict by virtual machine """
    logger.info(style.log_header('Virtual machines \n') + '%s',
                ', '.join([style.VM(vm['id']) + ' (' + str(vm['mem']) +
                           'Mb, ' + str(vm['n_cpu']) + ' cpu ' +
                           vm['cpuset'] + ', ' + str(vm['hdd']) + 'Gb)'
                           for vm in vms]))
예제 #18
0
 def clean_archi(self):
     """ Delete all files related to an existing DIET archi """
     logger.info("Clean DIET architecture")
     process = Process("./dietg/clean_archi_diet.sh")
     process.run()
     process = Process("./dietg/clean.sh")
     process.run()
     process = Process("if [ -e ./tmp ]; then rm ./tmp; fi")
     process.run()
예제 #19
0
파일: actions.py 프로젝트: lpouillo/vm5k
def restart_vms(vms):
    """ """
    hosts = [vm['host'] for vm in vms]
    running_vms = list_vm(hosts)
    for vm in vms:
        if {'id': vm['id']} not in running_vms[vm['host']]:
            logger.info('%s has not been started on %s, starting it',
                        style.vm(vm['id']), style.host(vm['host']))
            SshProcess('virsh start ' + vm['id'], vm['host']).run()
예제 #20
0
파일: actions.py 프로젝트: sphilippot/vm5k
def restart_vms(vms):
    """ """
    hosts = [vm['host'] for vm in vms]
    running_vms = list_vm(hosts)
    for vm in vms:
        if {'id': vm['id']} not in running_vms[vm['host']]:
            logger.info('%s has not been started on %s, starting it',
                        style.vm(vm['id']), style.host(vm['host']))
            SshProcess('virsh start ' + vm['id'], vm['host']).run()
예제 #21
0
 def configure_libvirt(self, bridge='br0', libvirt_conf=None):
     """Enable a bridge if needed on the remote hosts, configure libvirt
     with a bridged network for the virtual machines, and restart service.
     """
     self._enable_bridge()
     self._libvirt_check_service()
     self._libvirt_uniquify()
     self._libvirt_bridged_network(bridge)
     logger.info('Restarting %s', style.emph('libvirt'))
     self.fact.get_remote('service libvirtd restart', self.hosts).run()
예제 #22
0
파일: actions.py 프로젝트: sphilippot/vm5k
def show_vms(vms):
    """Print a short resume of vms parameters.

    :params vms: a list containing a dict by virtual machine """
    logger.info(
        style.log_header('Virtual machines \n') + '%s', ', '.join([
            style.VM(vm['id']) + ' (' + str(vm['mem']) + 'Mb, ' +
            str(vm['n_cpu']) + ' cpu ' + vm['cpuset'] + ', ' + str(vm['hdd']) +
            'Gb)' for vm in vms
        ]))
예제 #23
0
파일: deployment.py 프로젝트: lpouillo/vm5k
 def configure_libvirt(self, bridge='br0', libvirt_conf=None):
     """Enable a bridge if needed on the remote hosts, configure libvirt
     with a bridged network for the virtual machines, and restart service.
     """
     self._enable_bridge()
     self._libvirt_check_service()
     self._libvirt_uniquify()
     self._libvirt_bridged_network(bridge)
     logger.info('Restarting %s', style.emph('libvirt'))
     self.fact.get_remote('service libvirtd restart', self.hosts).run()
예제 #24
0
def set_cpu_load(load, vm_ip, pid):
    """Use cpulimit to change process intensity on vm"""
    logger.info('kill cpu_limit on %s and set it to %s', vm_ip, load)
    kill_cpu_limit = SshProcess(
        'ps aux| grep "cpulimit" | grep -v "grep" | awk \'{print $2}\' | xargs -r kill -9',
        vm_ip).run()
    start_cpu_limit = SshProcess(
        'cpulimit -p ' + str(pid) + ' -l ' + str(load), vm_ip)
    start_cpu_limit.nolog_exit_code = start_cpu_limit.ignore_exit_code = True
    start_cpu_limit.start()
예제 #25
0
파일: deployment.py 프로젝트: badock/vm5k
 def _upgrade_hosts(self):
     """Dist upgrade performed on all hosts"""
     logger.info('Upgrading packages')
     cmd = "echo 'debconf debconf/frontend select noninteractive' | debconf-set-selections ; " + \
           "echo 'debconf debconf/priority select critical' | debconf-set-selections ;      " + \
           "export DEBIAN_MASTER=noninteractive ; apt-get update ; " + \
           "apt-get dist-upgrade -y --force-yes -o Dpkg::Options::='--force-confdef' " + \
           "-o Dpkg::Options::='--force-confold' "
     upgrade = self.fact.get_remote(cmd, self.hosts).run()
     self._actions_hosts(upgrade)
예제 #26
0
 def _upgrade_hosts(self):
     """Dist upgrade performed on all hosts"""
     logger.info('Upgrading packages')
     cmd = "echo 'debconf debconf/frontend select noninteractive' | debconf-set-selections ; " + \
           "echo 'debconf debconf/priority select critical' | debconf-set-selections ;      " + \
           "export DEBIAN_MASTER=noninteractive ; apt-get update ; " + \
           "apt-get dist-upgrade -y --force-yes -o Dpkg::Options::='--force-confdef' " + \
           "-o Dpkg::Options::='--force-confold' "
     upgrade = self.fact.get_remote(cmd, self.hosts).run()
     self._actions_hosts(upgrade)
예제 #27
0
    def _other_packages(self, other_packages=None):
        """Installation of packages"""
        other_packages = other_packages.replace(',', ' ')
        logger.info('Installing extra packages \n%s',
                    style.emph(other_packages))

        cmd = 'export DEBIAN_MASTER=noninteractive ; ' + \
            'apt-get update && apt-get install -y --force-yes ' + \
            other_packages
        install_extra = self.fact.get_remote(cmd, self.hosts).run()
        self._actions_hosts(install_extra)
예제 #28
0
파일: deployment.py 프로젝트: badock/vm5k
    def _other_packages(self, other_packages=None):
        """Installation of packages"""
        other_packages = other_packages.replace(',', ' ')
        logger.info('Installing extra packages \n%s',
                    style.emph(other_packages))

        cmd = 'export DEBIAN_MASTER=noninteractive ; ' + \
            'apt-get update && apt-get install -y --force-yes ' + \
            other_packages
        install_extra = self.fact.get_remote(cmd, self.hosts).run()
        self._actions_hosts(install_extra)
예제 #29
0
파일: funk.py 프로젝트: mimbert/Funk
def show_resources(resources):
    total_hosts = 0
    log = set_style('Resources', 'log_header')
    for site in get_g5k_sites():
        if site in resources.keys():
            total_hosts += resources[site]
            log += '\n'+set_style(site, 'log_header').ljust(20)+' '+str(resources[site])+'\n'
            for cluster in get_site_clusters(site):
                if cluster in resources.keys():
                    log += set_style(cluster, 'emph')+': '+str(resources[cluster])+'  '
    logger.info(log)
    logger.info(set_style('total hosts: ', 'log_header') + str(total_hosts))
예제 #30
0
    def get_state(self, name=None, output=True, mode='compact', plot=False):
        """ """
        if not name:
            name = 'vm5k_' + strftime('%Y%m%d_%H%M%S', localtime())
        if output:
            output = self.outdir + '/' + name + '.xml'
            f = open(output, 'w')
            f.write(prettify(self.state))
            f.close()

        if mode == 'compact':
            log = self._print_state_compact()

        logger.info('State %s', log)
예제 #31
0
파일: deployment.py 프로젝트: badock/vm5k
    def get_state(self, name=None, output=True, mode='compact', plot=False):
        """ """
        if not name:
            name = 'vm5k_' + strftime('%Y%m%d_%H%M%S', localtime())
        if output:
            output = self.outdir + '/' + name + '.xml'
            f = open(output, 'w')
            f.write(prettify(self.state))
            f.close()

        if mode == 'compact':
            log = self._print_state_compact()

        logger.info('State %s', log)
예제 #32
0
파일: actions.py 프로젝트: sphilippot/vm5k
def activate_vms(vms, dest='lyon.grid5000.fr'):
    """Connect locally on every host and on all VMS to ping a host
    and update ARP tables"""
    logger.info('Executing ping from virtual machines on hosts')
    cmd = "VMS=`virsh list | grep -v State | grep -v -e '----' | awk '{print $2}'`; " + \
        "for VM in $VMS; do " + \
        " ssh $VM \"ping -c 3 " + dest + " \"; " + \
        "done"
    logger.debug('Launching ping probes to update ARP tables with %s', cmd)
    activate = TaktukRemote(cmd, list(set([vm['host'] for vm in vms])))
    for p in activate.processes:
        p.ignore_exit_code = p.nolog_exit_code = True
        if logger.getEffectiveLevel() <= 10:
            p.stdout_handlers.append(sys.stdout)
    activate.run()
    return activate.ok
예제 #33
0
파일: actions.py 프로젝트: lpouillo/vm5k
def activate_vms(vms, dest='lyon.grid5000.fr'):
    """Connect locally on every host and on all VMS to ping a host
    and update ARP tables"""
    logger.info('Executing ping from virtual machines on hosts')
    cmd = "VMS=`virsh list | grep -v State | grep -v -e '----' | awk '{print $2}'`; " + \
        "for VM in $VMS; do " + \
        " ssh $VM \"ping -c 3 " + dest + " \"; " + \
        "done"
    logger.debug('Launching ping probes to update ARP tables with %s', cmd)
    activate = TaktukRemote(cmd, list(set([vm['host'] for vm in vms])))
    for p in activate.processes:
        p.ignore_exit_code = p.nolog_exit_code = True
        if logger.getEffectiveLevel() <= 10:
            p.stdout_handlers.append(sys.stdout)
    activate.run()
    return activate.ok
예제 #34
0
def dnsmasq_server(server, clients=None, vms=None, dhcp=True):
    """Configure a DHCP server with dnsmasq

    :param server: host where the server will be installed

    :param clients: list of hosts that will be declared in dnsmasq

    :param vms: list of virtual machines

    """
    logger.debug('Installing and configuring a DNS/DHCP server on %s', server)

    test_running = Process('nmap ' + server + ' -p 53 | grep domain')
    test_running.shell = True
    test_running.run()
    if 'open' in test_running.stdout:
        logger.info('DNS server already running, updating configuration')
    else:
        cmd = 'killall dnsmasq; export DEBIAN_MASTER=noninteractive ; ' + \
            'apt-get update ; apt-get -y purge dnsmasq-base ; ' + \
            'apt-get install -t wheezy -o Dpkg::Options::="--force-confdef" ' + \
            '-o Dpkg::Options::="--force-confnew" ' + \
            '-y dnsmasq; echo 1 > /proc/sys/net/ipv4/ip_forward '
        SshProcess(cmd, server).run()

    sites = list(
        set([
            get_host_site(client)
            for client in clients if get_host_site(client)
        ] + [get_host_site(server)]))
    add_vms(vms, server)
    if clients:
        kill_dnsmasq = TaktukRemote('killall dnsmasq', clients)
        for p in kill_dnsmasq.processes:
            p.ignore_exit_code = p.nolog_exit_code = True
        kill_dnsmasq.run()
        resolv_conf(server, clients, sites)

    if dhcp:
        sysctl_conf(server, vms)
        dhcp_conf(server, vms, sites)

    logger.debug('Restarting service ...')
    cmd = 'service dnsmasq stop ; rm /var/lib/misc/dnsmasq.leases ; ' + \
        'service dnsmasq start',
    SshProcess(cmd, server).run()
예제 #35
0
    def configure_service_node(self):
        """Setup automatically a DNS server to access virtual machines by id
        and also install a DHCP server if kavlan is used"""
        if self.kavlan:
            service = 'DNS/DHCP'
            dhcp = True
        else:
            service = 'DNS'
            dhcp = False

        service_node = get_fastest_host(self.hosts)
        logger.info('Setting up %s on %s', style.emph(service),
                    style.host(service_node.split('.')[0]))
        clients = list(self.hosts)
        clients.remove(service_node)

        dnsmasq_server(service_node, clients, self.vms, dhcp)
예제 #36
0
파일: deployment.py 프로젝트: badock/vm5k
    def configure_service_node(self):
        """Setup automatically a DNS server to access virtual machines by id
        and also install a DHCP server if kavlan is used"""
        if self.kavlan:
            service = 'DNS/DHCP'
            dhcp = True
        else:
            service = 'DNS'
            dhcp = False

        service_node = get_fastest_host(self.hosts)
        logger.info('Setting up %s on %s', style.emph(service),
                    style.host(service_node.split('.')[0]))
        clients = list(self.hosts)
        clients.remove(service_node)

        dnsmasq_server(service_node, clients, self.vms, dhcp)
예제 #37
0
    def stop_archi(self):
        logger.info("Stop the architecture!")
        clients = [self.clients]
        servers = [host for host in self.servers]
        MA = self.MA

        cmd = "killall client > /dev/null"
        a = Remote(cmd, clients, connection_params = root_connection_params).run()
        
        cmd = "cd /root/dietg/; ./unset_sed.sh"
        a = Remote(cmd, servers, connection_params = root_connection_params).run()
        
        cmd = "cd /root/dietg/; ./unset_masternode.sh"
        a = Remote(cmd, MA, connection_params = root_connection_params).run()
        
        logger.info("Done!")
        
        for host in servers:
            os.system('ssh-keygen -f "/home/dbalouek/.ssh/known_hosts" -R '+host+" 2> /dev/null")
예제 #38
0
파일: planning.py 프로젝트: msimonin/execo
def show_resources(resources,
                   msg='Resources',
                   max_resources=None,
                   queues='default'):
    """Print the resources in a fancy way"""
    if not max_resources:
        max_resources = {}
    total_hosts = 0
    log = style.log_header(msg) + '\n'

    for site in get_g5k_sites():
        site_added = False
        if site in resources:
            log += style.log_header(site).ljust(20) + ' ' + str(
                resources[site])
            if site in max_resources:
                log += '/' + str(max_resources[site])
            log += ' '
            site_added = True
        for cluster in get_site_clusters(site, queues=queues):
            if len(list(set(get_site_clusters(site)) & set(resources.keys()))) > 0 \
                    and not site_added:
                log += style.log_header(site).ljust(20)
                if site in max_resources:
                    log += '/' + str(max_resources[site])
                log += ' '
                site_added = True
            if cluster in resources:
                log += style.emph(cluster) + ': ' + str(resources[cluster])
                if cluster in max_resources:
                    log += '/' + str(max_resources[cluster])
                log += ' '
                total_hosts += resources[cluster]
        if site_added:
            log += '\n'
    if 'grid5000' in resources:
        log += style.log_header('Grid5000').ljust(20) + str(
            resources['grid5000'])
        if "grid5000" in max_resources:
            log += '/' + str(max_resources["grid5000"])
    elif total_hosts > 0:
        log += style.log_header('Total ').ljust(20) + str(total_hosts)
    logger.info(log)
예제 #39
0
파일: dnsmasq.py 프로젝트: badock/vm5k
def dnsmasq_server(server, clients=None, vms=None, dhcp=True):
    """Configure a DHCP server with dnsmasq

    :param server: host where the server will be installed

    :param clients: list of hosts that will be declared in dnsmasq

    :param vms: list of virtual machines

    """
    logger.debug('Installing and configuring a DNS/DHCP server on %s', server)

    test_running = Process('nmap ' + server + ' -p 53 | grep domain')
    test_running.shell = True
    test_running.run()
    if 'open' in test_running.stdout:
        logger.info('DNS server already running, updating configuration')
    else:
        cmd = 'killall dnsmasq; export DEBIAN_MASTER=noninteractive ; ' + \
            'apt-get update ; apt-get -y purge dnsmasq-base ; ' + \
            'apt-get install -t wheezy -o Dpkg::Options::="--force-confdef" ' + \
            '-o Dpkg::Options::="--force-confnew" ' + \
            '-y dnsmasq; echo 1 > /proc/sys/net/ipv4/ip_forward '
        SshProcess(cmd, server).run()

    sites = list(set([get_host_site(client) for client in clients
                      if get_host_site(client)] + [get_host_site(server)]))
    add_vms(vms, server)
    if clients:
        kill_dnsmasq = TaktukRemote('killall dnsmasq', clients)
        for p in kill_dnsmasq.processes:
            p.ignore_exit_code = p.nolog_exit_code = True
        kill_dnsmasq.run()
        resolv_conf(server, clients, sites)

    if dhcp:
        sysctl_conf(server, vms)
        dhcp_conf(server, vms, sites)

    logger.debug('Restarting service ...')
    cmd = 'service dnsmasq stop ; rm /var/lib/misc/dnsmasq.leases ; ' + \
        'service dnsmasq start',
    SshProcess(cmd, server).run()
예제 #40
0
파일: deployment.py 프로젝트: badock/vm5k
 def _install_packages(self, other_packages=None, launch_disk_copy=True):
     """Installation of required packages on the hosts"""
     base_packages = 'uuid-runtime bash-completion taktuk locate htop init-system-helpers netcat-traditional'
     logger.info('Installing base packages \n%s', style.emph(base_packages))
     cmd = 'export DEBIAN_MASTER=noninteractive ; apt-get update && apt-get ' + \
         'install -y --force-yes --no-install-recommends ' + base_packages
     install_base = self.fact.get_remote(cmd, self.hosts).run()
     self._actions_hosts(install_base)
     if launch_disk_copy:
         self._start_disk_copy()
     libvirt_packages = 'libvirt-bin virtinst python2.7 python-pycurl python-libxml2 qemu-kvm nmap libgmp10'
     logger.info('Installing libvirt packages \n%s',
                 style.emph(libvirt_packages))
     cmd = 'export DEBIAN_MASTER=noninteractive ; apt-get update && apt-get install -y --force-yes '+\
         '-o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -t %s-backports ' % self.debian_name+\
         libvirt_packages
     install_libvirt = self.fact.get_remote(cmd, self.hosts).run()
     self._actions_hosts(install_libvirt)
     if other_packages:
         self._other_packages(other_packages)
예제 #41
0
 def _install_packages(self, other_packages=None, launch_disk_copy=True):
     """Installation of required packages on the hosts"""
     base_packages = 'uuid-runtime bash-completion taktuk locate htop init-system-helpers netcat-traditional'
     logger.info('Installing base packages \n%s', style.emph(base_packages))
     cmd = 'export DEBIAN_MASTER=noninteractive ; apt-get update && apt-get ' + \
         'install -y --force-yes --no-install-recommends ' + base_packages
     install_base = self.fact.get_remote(cmd, self.hosts).run()
     self._actions_hosts(install_base)
     if launch_disk_copy:
         self._start_disk_copy()
     libvirt_packages = 'libvirt-bin virtinst python2.7 python-pycurl python-libxml2 qemu-kvm nmap libgmp10'
     logger.info('Installing libvirt packages \n%s',
                 style.emph(libvirt_packages))
     cmd = 'export DEBIAN_MASTER=noninteractive ; apt-get update && apt-get install -y --force-yes '+\
         '-o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -t wheezy-backports '+\
         libvirt_packages
     install_libvirt = self.fact.get_remote(cmd, self.hosts).run()
     self._actions_hosts(install_libvirt)
     if other_packages:
         self._other_packages(other_packages)
예제 #42
0
    def start(self):
		logger.info('Put benchmarking files on hosts.')
		file_path  = os.path.join(os.path.dirname(__file__), '../resources/unixbench-5.1.3.tgz')
		bench_copy = Put(self.bench_list.values(), [file_path], "/tmp/").run()

		logger.info('Start benchmarking on ' + str(len(self.bench_list)) + ' hosts.')
		bench_install = Remote( 'cd /tmp/ &&'                     + \
								'tar xvfz unixbench-5.1.3.tgz &&' + \
								'cd unixbench-5.1.3/ &&'          + \
								'./Run arithmetic &&'             + \
								'cd ../ &&'                       + \
								'rm -rf unixbench-5.1.3/ &&'      + \
								'rm -rf unixbench-5.1.3.tgz',
								self.bench_list.values())

		for p in bench_install.processes:
			host = p.host.address + (':' + str(p.host.port)) if(p.host.port != None) else ""
			p.stdout_handlers.append(end_forwarder_stdout_handler(host, self.callback))

		bench_install.start()
예제 #43
0
    def start_clients(self):
        clients = [self.clients]
        servers = [host for host in self.servers]
            
        logger.info("Initialize client on node %s",clients)
        cmd = "cd "+sched_dir+"; make clean && make"
        a = Remote(cmd, clients, connection_params = root_connection_params).run()
        for s in a.processes:
            pout = s.stdout
        logger.debug(pout)
        cmd = "cd /root/dietg/; ./set_client.sh"
        a = Remote(cmd, clients, connection_params = root_connection_params).run()
        for s in a.processes:
            pout = s.stdout
        logger.debug(pout)
        
        cmd = "cd "+sched_dir+"; ./client_"+self.exp_size
        start = time.time()
        
        pause = 10
        if self.exp_size == "small":
            pause = 8
        elif self.exp_size == "regular":
            pause = 90 #90
        elif self.exp_size == "big":
            pause = 910
        self.task_distribution(len(self.servers),pause,cmd,work_rate = 2)
        #a = Remote(cmd, clients, connection_params = root_connection_params).run()
        
        end = time.time()
#         for s in a.processes:
#             pout = s.stdout
#         logger.info(pout)
        
        self.makespan = (end - start)
        
        
        
        logger.info("Done, check the logs!")
        
        return start,end
예제 #44
0
파일: api_utils.py 프로젝트: mickours/execo
def _is_cache_old_and_reachable(cache_dir):
    """Try to read the api_commit stored in the cache_dir and compare
    it with latest commit, return True if remote commit is different
    from cache commit"""
    try:
        with open(cache_dir + 'api_commit') as f:
            local_commit = f.readline()
    except:
        logger.detail('No commit version found')
        return True
    try:
        api_commit = get_resource_attributes('')['version']
    except:
        logger.warning('Unable to check API, reverting to cache')
        return False
    if local_commit != get_resource_attributes('')['version']:
        logger.info('Cache is outdated, will retrieve the latest commit')
        return True
    else:
        logger.detail('Already at the latest commit')
        return False
예제 #45
0
파일: utils.py 프로젝트: badock/vm5k
def get_oargrid_job_vm5k_resources(oargrid_job_id):
    """Retrieve the hosts list and (ip, mac) list by sites from an
    oargrid_job_id and return the resources dict needed by vm5k_deployment,
    with kavlan-global if used in the oargrid job """
    oargrid_job_id = int(oargrid_job_id)
    logger.info('Waiting job start')
    wait_oargrid_job_start(oargrid_job_id)
    resources = get_oar_job_vm5k_resources([(oar_job_id, site)
                                            for oar_job_id, site in
                                            get_oargrid_job_oar_jobs(oargrid_job_id)])
    kavlan_global = None
    for site, res in resources.iteritems():
        if res['kavlan'] >= 10:
            kavlan_global = {'kavlan': res['kavlan'],
                             'ip_mac': resources[site]['ip_mac'],
                             'site': site}
            break
    if kavlan_global:
        resources['global'] = kavlan_global

    return resources
예제 #46
0
파일: utils.py 프로젝트: jonglezb/vm5k
def get_oargrid_job_vm5k_resources(oargrid_job_id):
    """Retrieve the hosts list and (ip, mac) list by sites from an
    oargrid_job_id and return the resources dict needed by vm5k_deployment,
    with kavlan-global if used in the oargrid job """
    oargrid_job_id = int(oargrid_job_id)
    logger.info('Waiting job start')
    wait_oargrid_job_start(oargrid_job_id)
    resources = get_oar_job_vm5k_resources([(oar_job_id, site)
                                            for oar_job_id, site in
                                            get_oargrid_job_oar_jobs(oargrid_job_id)])
    kavlan_global = None
    for site, res in resources.iteritems():
        if res['kavlan'] >= 10:
            kavlan_global = {'kavlan': res['kavlan'],
                             'ip_mac': resources[site]['ip_mac'],
                             'site': site}
            break
    if kavlan_global:
        resources['global'] = kavlan_global

    return resources
예제 #47
0
파일: planning.py 프로젝트: msimonin/execo
def get_job_by_name(job_name, sites=None):
    """ """
    logger.detail('Looking for a job named %s', style.emph(job_name))
    if not sites:
        sites = get_g5k_sites()
    oargrid_jobs = get_current_oargrid_jobs()
    if len(oargrid_jobs) > 0:
        for g_job in oargrid_jobs:
            for job in get_oargrid_job_oar_jobs(g_job):
                info = get_oar_job_info(job[0], job[1])
                if info['name'] == job_name:
                    logger.info('Oargridjob %s found !', style.emph(g_job))
                    return g_job, None
    running_jobs = get_current_oar_jobs(sites)
    for job in running_jobs:
        info = get_oar_job_info(job[0], job[1])
        if info['name'] == job_name:
            logger.info('Job %s found on site %s !', style.emph(job[0]),
                        style.host(job[1]))
            return job
    return None, None
예제 #48
0
def _is_cache_old_and_reachable(cache_dir):
    """Try to read the api_commit stored in the cache_dir and compare
    it with latest commit, return True if remote commit is different
    from cache commit"""
    try:
        with open(cache_dir + 'api_commit') as f:
            local_commit = f.readline()
    except:
        logger.detail('No commit version found')
        return True
    try:
        api_commit = get_resource_attributes('')['version']
    except:
        logger.warning('Unable to check API, reverting to cache')
        return False
    if local_commit != get_resource_attributes('')['version']:
        logger.info('Cache is outdated, will retrieve the latest commit')
        return True
    else:
        logger.detail('Already at the latest commit')
        return False
예제 #49
0
def setup_aptcacher_server(hosts, base_dir='/tmp/apt-cacher-ng'):
    """Install and configure apt-cacher on one server"""
    hosts = map(Host, hosts)
    logger.info('Installing apt-cacher on %s',
                ','.join([style.host(host.address) for host in hosts]))
    logger.detail('Package')
    package = TaktukRemote(
        'export DEBIAN_MASTER=noninteractive ; apt-get update ; ' +
        'apt-get install -o Dpkg::Options::="--force-confdef" -o ' +
        'Dpkg::Options::="--force-confnew" -y apt-cacher-ng', hosts).run()
    if not package.ok:
        logger.error('Unable to install apt-cacher-ng on %s')
        return

    logger.detail('Directory creation')
    log_dir = base_dir + '/log'
    cache_dir = base_dir + '/cache'
    mkdirs = TaktukRemote(
        'mkdir -p ' + log_dir + '; mkdir -p ' + cache_dir +
        '; chown -R apt-cacher-ng:apt-cacher-ng ' + base_dir, hosts).run()
    if not mkdirs.ok:
        logger.error('Unable to create the directories')
        return
    cmd = 'sed -i "s#/var/cache/apt-cacher-ng#' + cache_dir + \
          '#g" /etc/apt-cacher-ng/acng.conf ;' + \
          'sed -i "s#/var/log/apt-cacher-ng#' + log_dir + '#g" ' + \
          '/etc/apt-cacher-ng/acng.conf ;' + \
          'sed -i "s/3142/9999/g" /etc/apt-cacher-ng/acng.conf ; ' + \
          'sed -i "s?#Proxy: http://www-proxy.example.net:80?Proxy: ' + \
          'http://proxy:3128?g" /etc/apt-cacher-ng/acng.conf ; ' + \
          'service apt-cacher-ng restart'
    configure = TaktukRemote(cmd, hosts).run()
    if not configure.ok:
        logger.error('Unable to configure and restart the service')
        return

    logger.info('apt-cacher-ng up and running on %s',
                ','.join([style.host(host.address) for host in hosts]))
예제 #50
0
파일: aptcacher.py 프로젝트: badock/vm5k
def setup_aptcacher_server(hosts, base_dir='/tmp/apt-cacher-ng'):
    """Install and configure apt-cacher on one server"""
    hosts = map(Host, hosts)
    logger.info('Installing apt-cacher on %s',
                ','.join([style.host(host.address) for host in hosts]))
    logger.detail('Package')
    package = TaktukRemote('export DEBIAN_MASTER=noninteractive ; apt-get update ; ' +
                           'apt-get install -o Dpkg::Options::="--force-confdef" -o ' +
                           'Dpkg::Options::="--force-confnew" -y apt-cacher-ng',
                           hosts).run()
    if not package.ok:
        logger.error('Unable to install apt-cacher-ng on %s')
        return

    logger.detail('Directory creation')
    log_dir = base_dir + '/log'
    cache_dir = base_dir + '/cache'
    mkdirs = TaktukRemote('mkdir -p ' + log_dir + '; mkdir -p ' + cache_dir +
                          '; chown -R apt-cacher-ng:apt-cacher-ng ' + base_dir,
                          hosts).run()
    if not mkdirs.ok:
        logger.error('Unable to create the directories')
        return
    cmd = 'sed -i "s#/var/cache/apt-cacher-ng#' + cache_dir + \
          '#g" /etc/apt-cacher-ng/acng.conf ;' + \
          'sed -i "s#/var/log/apt-cacher-ng#' + log_dir + '#g" ' + \
          '/etc/apt-cacher-ng/acng.conf ;' + \
          'sed -i "s/3142/9999/g" /etc/apt-cacher-ng/acng.conf ; ' + \
          'sed -i "s?#Proxy: http://www-proxy.example.net:80?Proxy: ' + \
          'http://proxy:3128?g" /etc/apt-cacher-ng/acng.conf ; ' + \
          'service apt-cacher-ng restart'
    configure = TaktukRemote(cmd, hosts).run()
    if not configure.ok:
        logger.error('Unable to configure and restart the service')
        return

    logger.info('apt-cacher-ng up and running on %s',
                ','.join([style.host(host.address) for host in hosts]))
예제 #51
0
    def _create_backing_file(self, disks=None):
        """ """
        if not self.copy_actions:
            self._start_disk_copy(disks)
        if not self.copy_actions.ended:
            logger.info("Waiting for the end of the disks copy")
            self.copy_actions.wait()
        if isinstance(self.copy_actions, ParallelActions):
            mv_actions = []
            for act in self.copy_actions.actions:
                fname = act.local_files[0].split('/')[-1]
                mv_actions.append(
                    self.fact.get_remote(
                        "mv /tmp/" + fname + " /tmp/orig_" + fname,
                        self.hosts))

            mv = ParallelActions(mv_actions).run()

        if not disks:
            disks = self.backing_files
        for bf in disks:
            raw_disk = '/tmp/orig_' + bf.split('/')[-1]
            to_disk = '/tmp/' + bf.split('/')[-1]
            self.fact.get_remote('cp ' + raw_disk + ' ' + to_disk,
                                 self.hosts).run()
            logger.info('Copying ssh key on ' + to_disk + ' ...')
            cmd = 'modprobe nbd max_part=16; ' + \
                'qemu-nbd --connect=/dev/nbd0 ' + to_disk + \
                ' ; sleep 3 ; partprobe /dev/nbd0 ; ' + \
                'part=`fdisk -l /dev/nbd0 |grep dev|grep Linux| grep -v swap|cut -f 1 -d " "` ; ' + \
                'mount $part /mnt ; mkdir -p /mnt/root/.ssh ; ' + \
                'cat /root/.ssh/authorized_keys >> /mnt/root/.ssh/authorized_keys ; ' + \
                'cp -r /root/.ssh/id_rsa* /mnt/root/.ssh/ ;' + \
                'umount /mnt; qemu-nbd -d /dev/nbd0'
            logger.detail(cmd)
            copy_on_vm_base = self.fact.get_remote(cmd, self.hosts).run()
            self._actions_hosts(copy_on_vm_base)
예제 #52
0
 def start_MA(self):
     hostname = self.MA
     logger.info("Initialize Master Agent on node %s",hostname)
     logger.debug("Compile the executables")
     cmd = "cd "+sched_dir+"; make clean && make"
     a = Remote(cmd, hostname, connection_params = root_connection_params).run()
     for s in a.processes:
         pout = s.stderr
     
     logger.info("Chosen scheduler is : %s",self.scheduler)
             
     cmd = "cd /root/dietg/; ./set_masternode.sh"
     a = Remote(cmd, hostname, connection_params = root_connection_params).start()
     for s in a.processes:
         pout = s.stdout
     logger.info(pout)
     
     logger.info("Done!")
예제 #53
0
    def _launch_kadeploy(self, max_tries=1, check_deploy=True):
        """Create a execo_g5k.Deployment object, launch the deployment and
        return a tuple (deployed_hosts, undeployed_hosts)
        """
        logger.info('Deploying %s hosts \n%s', len(self.hosts),
                    hosts_list(self.hosts))
        deployment = Deployment(
            hosts=[Host(canonical_host_name(host)) for host in self.hosts],
            env_file=self.env_file,
            env_name=self.env_name,
            user=self.env_user,
            vlan=self.kavlan)
        # Activate kadeploy output log if log level is debug
        if logger.getEffectiveLevel() <= 10:
            stdout = [sys.stdout]
            stderr = [sys.stderr]
        else:
            stdout = None
            stderr = None

        deployed_hosts, undeployed_hosts = deploy(
            deployment,
            stdout_handlers=stdout,
            stderr_handlers=stderr,
            num_tries=max_tries,
            check_deployed_command=check_deploy)
        deployed_hosts = list(deployed_hosts)
        undeployed_hosts = list(undeployed_hosts)
        # Renaming hosts if a kavlan is used
        if self.kavlan:
            for i, host in enumerate(deployed_hosts):
                deployed_hosts[i] = get_kavlan_host_name(host, self.kavlan)
            for i, host in enumerate(undeployed_hosts):
                undeployed_hosts[i] = get_kavlan_host_name(host, self.kavlan)
        logger.info('Deployed %s hosts \n%s', len(deployed_hosts),
                    hosts_list(deployed_hosts))
        cr = '\n' if len(undeployed_hosts) > 0 else ''
        logger.info('Failed %s hosts %s%s', len(undeployed_hosts), cr,
                    hosts_list(undeployed_hosts))
        self._update_hosts_state(deployed_hosts, undeployed_hosts)
        return deployed_hosts, undeployed_hosts
예제 #54
0
_json_dir = environ['HOME'] + '/.execo/topology/'
try:
    mkdir(_json_dir)
except:
    pass

logger.setLevel('WARNING')
g = g5k_graph([site])
logger.setLevel('INFO')
try:
    with open(_json_dir + site + '.json', 'r') as infile:
        old_json = json.load(infile)
    g_old = json_graph.node_link_graph(old_json)
    if is_isomorphic(g, g_old):
        logger.info('No change in graph since last map generation')
        update_needed = False
except:
    logger.info('No old json file')
    update_needed = True
    pass

if update_needed:
    logger.info('Updating wiki image and json cache')
    pagename = site.title() + ' Network Topology'
    text = "This page is generated automatically from the Network API " + \
        "and shows you the topology of [[" + site.title() + ":Network]].\n\n" + \
        "[[File:topo_" + site + ".png|500px|center]]"
    website = wiki.Wiki("http://140.77.13.123/mediawiki/api.php")
    website.login('lolo', password='******')
    topo = page.Page(website, pagename)
예제 #55
0
파일: planning.py 프로젝트: msimonin/execo
def draw_slots(slots, colors=None, show=False, save=True, outfile=None):
    """Draw the number of nodes available for the clusters (requires Matplotlib >= 1.2.0)

    :param slots: a list of slot, as returned by ``compute_slots``

    :param colors: a dict to define element coloring ``{'element': (255., 122., 122.)}``

    :param show: display the slots versus time

    :param save: save the plot to outfile

    :param outfile: specify the output file"""

    startstamp = slots[0][0]
    endstamp = slots[-1][1]
    if outfile is None:
        outfile = 'slots_' + format_date(startstamp)

    logger.info('Saving slots diagram to %s', style.emph(outfile))

    if colors is None:
        colors = _set_colors()

    xfmt = MD.DateFormatter('%d %b, %H:%M ')

    if endstamp - startstamp <= timedelta_to_seconds(timedelta(days=7)):
        x_major_locator = MD.HourLocator(byhour=[9, 19])
    elif endstamp - startstamp <= timedelta_to_seconds(timedelta(days=17)):
        x_major_locator = MD.HourLocator(byhour=[9])
    else:
        x_major_locator = MD.AutoDateLocator()

    max_nodes = {}
    total_nodes = 0
    slot_limits = []
    total_list = []
    i_slot = 0
    for slot in slots:
        slot_limits.append(slot[0])
        if i_slot + 1 < len(slots):
            slot_limits.append(slots[i_slot + 1][0])
            i_slot += 1

        for element, n_nodes in slot[2].items():
            if element in get_g5k_clusters(queues=None):
                if not element in max_nodes:
                    max_nodes[element] = []
                max_nodes[element].append(n_nodes)
                max_nodes[element].append(n_nodes)
            if element == 'grid5000':
                total_list.append(n_nodes)
                total_list.append(n_nodes)
                if n_nodes > total_nodes:
                    total_nodes = n_nodes

    slot_limits.append(endstamp)
    slot_limits.sort()

    dates = [unixts_to_datetime(ts) for ts in slot_limits]

    datenums = MD.date2num(dates)

    fig = PLT.figure(figsize=(15, 10), dpi=80)

    ax = PLT.subplot(111)
    ax.xaxis_date()
    box = ax.get_position()
    ax.set_position([box.x0 - 0.07, box.y0, box.width, box.height])
    ax.set_xlim(unixts_to_datetime(startstamp), unixts_to_datetime(endstamp))
    ax.set_xlabel('Time')
    ax.set_ylabel('Nodes available')
    ax.set_ylim(0, total_nodes * 1.1)
    ax.axhline(y=total_nodes,
               color='#000000',
               linestyle='-',
               linewidth=2,
               label='ABSOLUTE MAXIMUM')
    ax.yaxis.grid(color='gray', linestyle='dashed')
    ax.xaxis.set_major_formatter(xfmt)
    ax.xaxis.set_major_locator(x_major_locator)
    PLT.xticks(rotation=15)

    max_nodes_list = []

    p_legend = []
    p_rects = []
    p_colors = []
    for key, value in sorted(max_nodes.items()):
        if key != 'grid5000':
            max_nodes_list.append(value)
            p_legend.append(key)
            p_rects.append(PLT.Rectangle((0, 0), 1, 1, fc=colors[key]))
            p_colors.append(colors[key])

    plots = PLT.stackplot(datenums, max_nodes_list, colors=p_colors)
    PLT.legend(p_rects,
               p_legend,
               loc='center right',
               ncol=1,
               shadow=True,
               bbox_to_anchor=(1.2, 0.5))

    if show:
        PLT.show()
    if save:
        logger.debug('Saving file %s ...', outfile)
        PLT.savefig(outfile, dpi=300)
예제 #56
0
def print_step(step_desc=None):
    """Print an on_magenta coloured string"""
    logger.info(style.step('* ' + step_desc).ljust(45))
예제 #57
0
#                 help="Run the injector after generating events")
# optmodes.add_argument('--prepare-and-run',
#                       T)

logger.setLevel('INFO')

default_connection_params['user'] = '******'

vms = {}
f = open('vms.list')
for line in f:
    ip, vm = line.strip().split('\t')
    vms[vm] = ip
f.close()

logger.info('Installing cpulimit on all VMs')
apt = TaktukRemote('apt-get install -y cpulimit', vms.values()).run()

logger.info('Copying memtouch on all vms')
copy_stress = TaktukPut(vms.values(), ['memtouch-with-busyloop3']).run()

logger.info('Killing all memtouch processes')
kill_stress = TaktukRemote('killall memtouch-with-busyloop3', vms.values())
for p in kill_stress.processes:
    p.shell = True
    p.nolog_exit_code = ignore_exit_code = True
kill_stress.run()

logger.info('Starting memtouch process')
cmd = './memtouch-with-busyloop3 --cmd-makeload ' +\
    '--cpu-speed 304408.621872 --mem-speed 63235516.087661 128 128'
예제 #58
0
source_folder = os.getcwd()
print len(sys.argv)

if len(sys.argv) > 1:
    script, xml_folder, fusion_folder = argv
else:
    xml_folder = "."
    fusion_folder = "/home/ftp/Musique/fusion/"

#Initialize the threads
threads = []
maxconnections = 20  #Maximum of parallel download
pool_sema = BoundedSemaphore(value=maxconnections)

os.chdir(xml_folder)
logger.info("Targer Folder = %s", os.getcwd())
logger.info("Looking for XML files...")
cpt_files = 0
for xml_file in os.listdir(xml_folder):
    #For each xml file, create a thread
    if xml_file.endswith(".xml"):
        current = thread_download(xml_file)
        threads.append(current)
        current.start()

for t in threads:
    t.join()
    logger.debug("A thread is done!")

os.chdir(source_folder)
#We got back to the folder of the script
예제 #59
0
    def run(self):
        tree = ET.parse(self.xml_file)
        root = tree.getroot()
        genre = self.xml_file.split(".")[0]
        xml_folder = fusion_folder + "/" + genre
        try:
            os.mkdir(xml_folder)
        except OSError:
            logger.debug("The folder %s already exists", xml_folder)
        if root.tag == genre:
            logger.debug("%s.xml is a valid file!", genre)
            #cpt_files +=1
            dl_cpt = 0
            total = 0
            current_cpt = 0
            for child in root:
                if hasBeenDownloaded(child) == False:
                    total += 1

            for child in root:
                #child_cpt = 0
                if hasBeenDownloaded(child) == False:
                    artist = safe_unicode(child[0].text).encode('utf-8')
                    titre = safe_unicode(child[1].text).encode('utf-8')

                    replacements(artist)
                    replacements(titre)
                    logger.debug("Song to download: %s - %s", artist, titre)
                    logger.debug("Download: %d / Current: %d / Total:%d",
                                 dl_cpt, current_cpt, total)
                    dl = mp3juices.search(artist, titre, xml_folder)
                    current_cpt += 1
                    logger.debug("xml_folder = %s", xml_folder)

                    #Management of timeout and socket errors
                    try:
                        pool_sema.acquire()
                        nb_res = dl.download()
                        pool_sema.release()
                    except socket.error as msg:
                        nb_res = -1
                        continue
                    except KeyboardInterrupt:
                        nb_res = -1
                        raise
                    if nb_res > 0:
                        logger.debug("[%s] Found a link for the song! %s - %s",
                                     genre, artist, titre)
                        setAsDownloaded(child)
                        logger.info(
                            "Song has been tag download as : %s / %s - %s",
                            hasBeenDownloaded(child), artist, titre)
                        dl_cpt += 1
                    else:
                        logger.debug(
                            "[%s] Could not find any link for the song! %s - %s",
                            genre, artist, titre)
                    logger.info("[%s] Download: %d / Current: %d / Total:%d",
                                genre, dl_cpt, current_cpt, total)
            tree.write(self.xml_file)
        return