Exemplo n.º 1
0
    def workflow(self, comb):
        """ """
        comb_ok = False
        logger.info(slugify(comb) + \
                             ' starts to compile')
        try:
	    export = "source /opt/intel/bin/compilervars.sh intel64; "
	    
	    src_directory = "/home/arrouan/workspace/aevol/git/world/aevol/"
	    
	    bin_directory = "/home/arrouan/workspace/aevol/compiled_binary/"
    
	    configure_option = "--with-tracing --without-x"
	    
	    if comb['parallel'] == 'tbb':
	      configure_option += " --with-tbb"
	      
	    if comb['blas'] == 'openblas':
	      configure_option += " --with-blas"
	    elif comb['blas'] == 'mkl':
	      configure_option += " --with-mkl"
	    elif comb['blas'] == 'atlas':
	      configure_option += " --with-atlas"
	        
	    if comb['experiment'] == 'raevol':
	      configure_option += " --with-raevol"
	      
	    if comb['compilator'] == 'intel':
	      configure_option += " CXX=icc"
	      
	    full_bin_directory = bin_directory + comb['experiment']+'_'+comb['compilator']+'_'+comb['parallel']+'_'+comb['blas']
	    
	    try:
	      os.mkdir(full_bin_directory)
            except:
	      for f in os.listdir(full_bin_directory):
		os.remove(full_bin_directory + "/" + f)
		
	    p = Process(export+'cd '+src_directory+'; autoreconf; ./configure '+configure_option+'; make clean; make; cp src/aevol_run '+full_bin_directory+'/; cp src/aevol_create '+full_bin_directory+'/')
	    p.shell = True
	    #
	    p.run()
	    
	    print p.stdout
	    
            comb_ok = True
        finally:
            if comb_ok:
                self.sweeper.done(comb)
	        logger.info(slugify(comb) + \
                             ' has been done')
            else:
                self.sweeper.cancel(comb)
                logger.warning(slugify(comb) + \
                            ' has been canceled')
        logger.info(style.step('%s Remaining'),
                        len(self.sweeper.get_remaining()))
Exemplo n.º 2
0
def get_server_ip(host):
    """Get the server IP"""
    if isinstance(host, Host):
        host = host.address
    logger.debug('Retrieving IP from %s', style.host(host))
    get_ip = Process('host ' + host + ' |cut -d \' \' -f 4')
    get_ip.shell = True
    get_ip.run()
    ip = get_ip.stdout.strip()
    return ip
Exemplo n.º 3
0
def get_server_ip(host):
    """Get the server IP"""
    if isinstance(host, Host):
        host = host.address
    logger.debug('Retrieving IP from %s', style.host(host))
    get_ip = Process('host ' + host + ' |cut -d \' \' -f 4')
    get_ip.shell = True
    get_ip.run()
    ip = get_ip.stdout.strip()
    return ip
Exemplo n.º 4
0
 def create_diet_architecture_files(self):
     logger.info("Create a DIET architecture") 
     # Architecture without LA
     process = Process("./dietg/set_archi_diet_4.sh gridnodes "+str(self.nodes_service[0]))
     process.run()
     
     MA_file = "./dietg/cfgs/MA1.cfg"
     SeD_file = ['./dietg/cfgs/server.cfg']
     logger.info("Create MA file")
     set_scheduler(MA_file, self.scheduler)
     logger.info("Create Sed files")
     for file2 in SeD_file:
         # print file2
         set_parallel_jobs(file2, self.concLimit)
Exemplo n.º 5
0
    def get_logs_from_server(self):
        distant_file = "/root/MA.stat"
        local_folder = "./results_"+self.oargrid_job_id+"_"+self.scheduler+"/"
        
        nb_files = 0
#         Get(self.servers, [distant_file])
        for host in self.servers:
                local_file = local_folder+host+"_"+self.scheduler+"_SeD.stat"
                process = Process("scp root@"+host+":"+distant_file+" "+local_file)
                process.run()
                try: #si le fichier existe
                    with open(local_file) as fichier:
                        nb_files += 1
                        fichier.close
                except IOError: #si le fichier n'existe pas
                    pass
        return nb_files
Exemplo n.º 6
0
def dnsmasq_server(server, clients=None, vms=None, dhcp=True):
    """Configure a DHCP server with dnsmasq

    :param server: host where the server will be installed

    :param clients: list of hosts that will be declared in dnsmasq

    :param vms: list of virtual machines

    """
    logger.debug('Installing and configuring a DNS/DHCP server on %s', server)

    test_running = Process('nmap ' + server + ' -p 53 | grep domain')
    test_running.shell = True
    test_running.run()
    if 'open' in test_running.stdout:
        logger.info('DNS server already running, updating configuration')
    else:
        cmd = 'killall dnsmasq; export DEBIAN_MASTER=noninteractive ; ' + \
            'apt-get update ; apt-get -y purge dnsmasq-base ; ' + \
            'apt-get install -t wheezy -o Dpkg::Options::="--force-confdef" ' + \
            '-o Dpkg::Options::="--force-confnew" ' + \
            '-y dnsmasq; echo 1 > /proc/sys/net/ipv4/ip_forward '
        SshProcess(cmd, server).run()

    sites = list(
        set([
            get_host_site(client)
            for client in clients if get_host_site(client)
        ] + [get_host_site(server)]))
    add_vms(vms, server)
    if clients:
        kill_dnsmasq = TaktukRemote('killall dnsmasq', clients)
        for p in kill_dnsmasq.processes:
            p.ignore_exit_code = p.nolog_exit_code = True
        kill_dnsmasq.run()
        resolv_conf(server, clients, sites)

    if dhcp:
        sysctl_conf(server, vms)
        dhcp_conf(server, vms, sites)

    logger.debug('Restarting service ...')
    cmd = 'service dnsmasq stop ; rm /var/lib/misc/dnsmasq.leases ; ' + \
        'service dnsmasq start',
    SshProcess(cmd, server).run()
Exemplo n.º 7
0
    def get_nb_tasks_server(self):
        distant_file = "/root/dietg/log/total.jobs"
        local_file = "./task_counter"
        
#         Get(self.servers, [distant_file])
        for host in self.servers:
                process = Process("scp root@"+host+":"+distant_file+" "+local_file)
                process.run()
                try: #si le fichier existe
                    with open(local_file) as fichier:
                        self.nb_tasks[host] = get_nb_tasks(local_file)
                        fichier.close
                except IOError: #si le fichier n'existe pas
                    self.nb_tasks[host] = 0
                try:
                    os.remove(local_file);
                except OSError:
                    pass
Exemplo n.º 8
0
def dnsmasq_server(server, clients=None, vms=None, dhcp=True):
    """Configure a DHCP server with dnsmasq

    :param server: host where the server will be installed

    :param clients: list of hosts that will be declared in dnsmasq

    :param vms: list of virtual machines

    """
    logger.debug('Installing and configuring a DNS/DHCP server on %s', server)

    test_running = Process('nmap ' + server + ' -p 53 | grep domain')
    test_running.shell = True
    test_running.run()
    if 'open' in test_running.stdout:
        logger.info('DNS server already running, updating configuration')
    else:
        cmd = 'killall dnsmasq; export DEBIAN_MASTER=noninteractive ; ' + \
            'apt-get update ; apt-get -y purge dnsmasq-base ; ' + \
            'apt-get install -t wheezy -o Dpkg::Options::="--force-confdef" ' + \
            '-o Dpkg::Options::="--force-confnew" ' + \
            '-y dnsmasq; echo 1 > /proc/sys/net/ipv4/ip_forward '
        SshProcess(cmd, server).run()

    sites = list(set([get_host_site(client) for client in clients
                      if get_host_site(client)] + [get_host_site(server)]))
    add_vms(vms, server)
    if clients:
        kill_dnsmasq = TaktukRemote('killall dnsmasq', clients)
        for p in kill_dnsmasq.processes:
            p.ignore_exit_code = p.nolog_exit_code = True
        kill_dnsmasq.run()
        resolv_conf(server, clients, sites)

    if dhcp:
        sysctl_conf(server, vms)
        dhcp_conf(server, vms, sites)

    logger.debug('Restarting service ...')
    cmd = 'service dnsmasq stop ; rm /var/lib/misc/dnsmasq.leases ; ' + \
        'service dnsmasq start',
    SshProcess(cmd, server).run()
Exemplo n.º 9
0
 def clean_archi(self):
     """ Delete all files related to an existing DIET archi """
     logger.info("Clean DIET architecture")
     process = Process("./dietg/clean_archi_diet.sh")
     process.run()
     process = Process("./dietg/clean.sh")
     process.run()
     process = Process("if [ -e ./tmp ]; then rm ./tmp; fi")
     process.run()
Exemplo n.º 10
0
    def workflow(self, comb):
        """
            Compute one application launch 
            using a given parameter group
        """
        comb_ok = False
        try:
            # Generate configuration file needed by MPI processes
            logger.info("Generating assembly file...")
            py = comb['cores'] / comb['px']
            prepare = Process('cd %s && python %s %d %d %d %d %d %s app.lad' % 
                (self.workingPath, self.genLadScript, comb['datasize'], comb['datasize'], 
                    comb['datasize'], comb['px'], py, comb['transposition']))
            prepare.shell = True
            prepare.run()

            # Generate the MPI host file
            mfile = self.generate_machine_file()

            # Start L2C
            lad = "./app.lad"
            logger.info("Computing...")
            res = Process("export OAR_JOB_KEY_FILE=~/.oar_key ; cd %s && l2c_loader -M,-machinefile,%s --mpi -c %d %s" % (self.workingPath, mfile, comb['cores'], lad))
            res.shell = True
            res.stdout_handlers.append(os.path.join(self.result_dir, slugify(comb) + '.out'))
            res.stdout_handlers.append(sys.stdout)
            res.stderr_handlers.append(os.path.join(self.result_dir, slugify(comb) + '.err'))
            res.stderr_handlers.append(sys.stderr)
            res.run()
            if not res.ok:
                logger.error('Bad L2C termination')
                raise Exception('Bad L2C termination')
            if len(res.stderr) > 0: # WARNING: when L2C cannot find the LAD file or something strange like this
                logger.warning('Not empty error output')

            # Clean configuration files
            logger.info("Removing assembly files...")
            res = Process('cd %s && rm -f app.lad*' % self.workingPath)
            res.shell = True
            res.run()
                
            comb_ok = True
        except Exception:
            pass
        finally:
            if comb_ok:
                self.sweeper.done(comb)
                logger.info(style.host(slugify(comb)) + ' has been done')
            else:
                self.sweeper.cancel(comb)
                logger.warning(style.host(slugify(comb)) + ' has been canceled')
        
            logger.info(style.step('%s Remaining'),
                        len(self.sweeper.get_remaining()))
Exemplo n.º 11
0
    def workflow(self, comb):
        """
            Compute one application launch 
            using a given parameter group
        """
        comb_ok = False
        try:
            # Generate configuration file needed by MPI processes
            logger.info("Generating assembly file...")
            py = comb['cores'] / comb['px']
            prepare = Process(
                'cd %s && python %s %d %d %d %d %d %s app.lad' %
                (self.workingPath, self.genLadScript, comb['datasize'],
                 comb['datasize'], comb['datasize'], comb['px'], py,
                 comb['transposition']))
            prepare.shell = True
            prepare.run()

            # Generate the MPI host file
            mfile = self.generate_machine_file()

            # Start L2C
            lad = "./app.lad"
            logger.info("Computing...")
            res = Process(
                "export OAR_JOB_KEY_FILE=~/.oar_key ; cd %s && l2c_loader -M,-machinefile,%s --mpi -c %d %s"
                % (self.workingPath, mfile, comb['cores'], lad))
            res.shell = True
            res.stdout_handlers.append(
                os.path.join(self.result_dir,
                             slugify(comb) + '.out'))
            res.stdout_handlers.append(sys.stdout)
            res.stderr_handlers.append(
                os.path.join(self.result_dir,
                             slugify(comb) + '.err'))
            res.stderr_handlers.append(sys.stderr)
            res.run()
            if not res.ok:
                logger.error('Bad L2C termination')
                raise Exception('Bad L2C termination')
            if len(
                    res.stderr
            ) > 0:  # WARNING: when L2C cannot find the LAD file or something strange like this
                logger.warning('Not empty error output')

            # Clean configuration files
            logger.info("Removing assembly files...")
            res = Process('cd %s && rm -f app.lad*' % self.workingPath)
            res.shell = True
            res.run()

            comb_ok = True
        except Exception:
            pass
        finally:
            if comb_ok:
                self.sweeper.done(comb)
                logger.info(style.host(slugify(comb)) + ' has been done')
            else:
                self.sweeper.cancel(comb)
                logger.warning(
                    style.host(slugify(comb)) + ' has been canceled')

            logger.info(style.step('%s Remaining'),
                        len(self.sweeper.get_remaining()))
Exemplo n.º 12
0
sed_time = Process('sed -i "s/simulator.duration.*/simulator.duration = ' +
                   str(1000) + '/g"' +
                   ' load_events_generator/config/simulator.properties').run()
sed_nodes = Process(
    'sed -i "s/nodes.number.*/nodes.number = ' + str(n_host) + '/g"' +
    ' load_events_generator/config/simulator.properties').run()
sed_vms = Process('sed -i "s/vm.number.*/vm.number = ' + str(len(vms)) +
                  '/g"' +
                  ' load_events_generator/config/simulator.properties').run()

logger.info('Generating events list')
gen_events = Process(
    'cd load_events_generator ; ' +
    'java -jar load_events_generator.jar vms.list > ../events_load.xml')
gen_events.shell = True
gen_events.run()

tree = ET.parse('events_load.xml')
root = tree.getroot()
events = {}
for event in root.findall('./event'):
    events[int(round(float(event.get('time'))))] = {
        'vm': event.get('target'),
        'load': event.get('value')
    }


def set_cpu_load(load, vm_ip, pid):
    """Use cpulimit to change process intensity on vm"""
    logger.info('kill cpu_limit on %s and set it to %s', vm_ip, load)
    kill_cpu_limit = SshProcess(
Exemplo n.º 13
0
f = open('hosts.list')
for line in f:
    n_host += 1
f.close()
sed_time = Process('sed -i "s/simulator.duration.*/simulator.duration = ' + str(1000) + '/g"' + 
      ' load_events_generator/config/simulator.properties').run()
sed_nodes = Process('sed -i "s/nodes.number.*/nodes.number = ' + str(n_host) + '/g"' + 
      ' load_events_generator/config/simulator.properties').run()
sed_vms = Process('sed -i "s/vm.number.*/vm.number = ' + str(len(vms)) + '/g"' + 
      ' load_events_generator/config/simulator.properties').run()

logger.info('Generating events list')
gen_events = Process('cd load_events_generator ; ' +
      'java -jar load_events_generator.jar vms.list > ../events_load.xml')
gen_events.shell = True
gen_events.run()

tree = ET.parse('events_load.xml') 
root = tree.getroot()
events = {}
for event in root.findall('./event'):
    events[int(round(float(event.get('time'))))] = {'vm': event.get('target'),
                                'load': event.get('value')}

 
def set_cpu_load(load, vm_ip, pid):
    """Use cpulimit to change process intensity on vm"""
    logger.info('kill cpu_limit on %s and set it to %s', vm_ip, load)
    kill_cpu_limit = SshProcess('ps aux| grep "cpulimit" | grep -v "grep" | awk \'{print $2}\' | xargs -r kill -9',
                                vm_ip).run()
    start_cpu_limit = SshProcess('cpulimit -p ' + str(pid) + ' -l ' + str(load), vm_ip)