コード例 #1
0
parser = ArgumentParser(prog=sys.argv[0],
                        description='Update topology maps on the' +
                        ' Grid\'5000 wiki')
parser.add_argument('site', help='Choose the site')
args = parser.parse_args()
site = args.site
if site not in get_g5k_sites():
    logger.error('%s is not a valid G5K site')

_json_dir = environ['HOME'] + '/.execo/topology/'
try:
    mkdir(_json_dir)
except:
    pass

logger.setLevel('WARNING')
g = g5k_graph([site])
logger.setLevel('INFO')
try:
    with open(_json_dir + site + '.json', 'r') as infile:
        old_json = json.load(infile)
    g_old = json_graph.node_link_graph(old_json)
    if is_isomorphic(g, g_old):
        logger.info('No change in graph since last map generation')
        update_needed = False
except:
    logger.info('No old json file')
    update_needed = True
    pass

if update_needed:
コード例 #2
0
ファイル: download.py プロジェクト: chocoyaki/fusion
import unicodedata
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import SubElement
from xml.dom import minidom
import datetime
from time import sleep
import sys
import logging
from execo import logger
import socket

import urllib

from threading import Thread, BoundedSemaphore

logger.setLevel(logging.INFO)

import mp3juices


def strip_accents(s):
    s = s.decode('unicode-escape')
    return ''.join(c for c in unicodedata.normalize('NFD', s)
                   if unicodedata.category(c) != 'Mn')


# create a subclass and override the handler methods
def safe_unicode(obj, *args):
    """ return the unicode representation of obj """
    try:
        return unicode(obj, *args)
コード例 #3
0
from execo import configuration, logger, Put, Process, Remote, Get, Host
#from execo.log import set_style
from diet_utils import getNodesfromFile, set_scheduler, set_parallel_jobs,\
    get_g5k_api_measures, get_nb_tasks, writeNodesToFile
from execo_g5k.api_utils import get_host_site, get_host_attributes
import sys
import os
import time

import logging
from time import gmtime, strftime

logger.setLevel(logging.DEBUG)

sched_dir = "/root/dietg/diet-sched-example/"
cfgs_dir = "/root/dietg/cfgs/"
root_connection_params = {'user': '******', 'ssh_options': ('-tt', '-o', 'BatchMode=yes', '-o', 'PasswordAuthentication=no', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', '-o', 'ConnectTimeout=45')}

class DietDeploy():
    
    def __init__(self,params_diet):
        
        self.site = params_diet["site"]
        self.frontend = self.site+".grid5000.fr"
        self.user_frontend_connection_params = {'user': '******', 'default_frontend': self.site}
        
        self.nb_nodes = 0
                  
        self.MA = [] 
        self.LA = []
        self.servers = []
コード例 #4
0
ファイル: load_injector.py プロジェクト: badock/vm5k
# 
# optmodes = parser.add_mutually_exclusive_group()
# optmodes.add_argument('--prepare',
#                 dest="prepare",
#                 action="store_true",
#                 default=False,
#                 help="prepare the VMs for injection")
# optmodes.add_argument('--run',
#                 dest="run",
#                 action="store_true",
#                 default=False,
#                 help="Run the injector after generating events")
# optmodes.add_argument('--prepare-and-run',
#                       T)
 
logger.setLevel('INFO')

default_connection_params['user'] = '******'

vms = {}
f = open('vms.list')
for line in f:
    ip, vm = line.strip().split('\t')
    vms[vm] = ip
f.close()
 
logger.info('Installing cpulimit on all VMs')
apt = TaktukRemote('apt-get install -y cpulimit', vms.values()).run()
 
logger.info('Copying memtouch on all vms')
copy_stress = TaktukPut(vms.values(), ['memtouch-with-busyloop3']).run()
コード例 #5
0
#
# optmodes = parser.add_mutually_exclusive_group()
# optmodes.add_argument('--prepare',
#                 dest="prepare",
#                 action="store_true",
#                 default=False,
#                 help="prepare the VMs for injection")
# optmodes.add_argument('--run',
#                 dest="run",
#                 action="store_true",
#                 default=False,
#                 help="Run the injector after generating events")
# optmodes.add_argument('--prepare-and-run',
#                       T)

logger.setLevel('INFO')

default_connection_params['user'] = '******'

vms = {}
f = open('vms.list')
for line in f:
    ip, vm = line.strip().split('\t')
    vms[vm] = ip
f.close()

logger.info('Installing cpulimit on all VMs')
apt = TaktukRemote('apt-get install -y cpulimit', vms.values()).run()

logger.info('Copying memtouch on all vms')
copy_stress = TaktukPut(vms.values(), ['memtouch-with-busyloop3']).run()
コード例 #6
0
    def run(self):
        """ """
        if self.options.oargrid_job_id is not None:
            self.oar_job_id = self.options.oargrid_job_id
        else:
            self.oar_job_id = None

        self.list_of_clusters = [
            'parasilo', 'paravance', 'parapluie', 'paranoia'
        ]

        try:
            # Creation of the main iterator which is used for the first control loop.
            self.define_parameters()
            self.working_dir = '/data/jorouzaudcornabas_' + str(
                self.options.storage5k_job_id)

            job_is_dead = False
            # While there are combinations to treat
            while len(self.sweeper.get_remaining()) > 0:
                # If no job, we make a reservation and prepare the hosts for the experiments
                if self.oar_job_id is None:
                    self.submit_all_available_best_effort(
                        self.list_of_clusters, self.options.walltime)
                    # self.make_reservation_local()
                # Wait that the job starts
                logger.info('Waiting that the job start ' +
                            str(self.oar_job_id))
                wait_oar_job_start(self.oar_job_id)
                # Retrieving the hosts and subnets parameters
                self.hosts = get_oar_job_nodes(self.oar_job_id)
                # Hosts deployment and configuration
                default_connection_params['user'] = '******'

                logger.info("Start hosts configuration")
                ex_log.setLevel('INFO')
                #===============================================================
                # deployment = Deployment(hosts = self.hosts,
                #             env_file='/home/sirimie/env/mywheezy-x64-base.env')
                # self.hosts, _ = deploy(deployment)
                #===============================================================
                if len(self.hosts) == 0:
                    break

                # Initializing the resources and threads
                available_hosts = self.hosts

                threads = {}

                # Creating the unique folder for storing the results
                comb_dir = self.result_dir + '/logs'
                if not os.path.exists(comb_dir):
                    os.mkdir(comb_dir)

                logger.info("Starting the thread " + str(self.is_job_alive()) +
                            " " + str(len(threads.keys())))
                # Checking that the job is running and not in Error
                while self.is_job_alive() or len(threads.keys()) > 0:
                    job_is_dead = False

                    while self.options.n_nodes > len(available_hosts):
                        tmp_threads = dict(threads)
                        for t in tmp_threads:
                            if not t.is_alive():
                                available_hosts.append(tmp_threads[t]['host'])
                                del threads[t]
                        sleep(5)
                        if not self.is_job_alive():
                            job_is_dead = True
                            break
                    if job_is_dead:
                        break

                    # Getting the next combination
                    comb = self.sweeper.get_next()
                    if not comb:
                        while len(threads.keys()) > 0:
                            tmp_threads = dict(threads)
                            for t in tmp_threads:
                                if not t.is_alive():
                                    del threads[t]
                            logger.info('Waiting for threads to complete')
                            sleep(20)
                        break

                    host = available_hosts[0]
                    available_hosts = available_hosts[1:]
                    logger.info("Launching thread")
                    t = Thread(target=self.workflow,
                               args=(comb, host, comb_dir))
                    threads[t] = {'host': host}
                    t.daemon = True
                    t.start()

                if not self.is_job_alive():
                    job_is_dead = True

                if job_is_dead:
                    self.oar_job_id = None

        finally:
            if self.oar_job_id is not None:
                if not self.options.keep_alive:
                    logger.info('Deleting job')
                    oardel([self.oar_job_id])
                else:
                    logger.info('Keeping job alive for debugging')
コード例 #7
0
    def run(self):
        """ """
        if self.options.oargrid_job_id:
            self.oargrid_job_id = self.options.oargrid_job_id
        else:
            self.oargrid_job_id = None

        try:
            # Creation of the main iterator which is used for the first control loop.
            self.define_parameters()

            job_is_dead = False
            # While there are combinations to treat
            while len(self.sweeper.get_remaining()) > 0:
                # If no job, we make a reservation and prepare the hosts for the experiments
                if self.oargrid_job_id is None:
                    self.make_reservation()
                # Wait that the job starts
                logger.info('Waiting that the job start')
                wait_oargrid_job_start(self.oargrid_job_id)
                # Retrieving the hosts and subnets parameters
                self.hosts = get_oargrid_job_nodes(self.oargrid_job_id)
                # Hosts deployment and configuration

                default_connection_params['user'] = '******'

                logger.info("Start hosts configuration")
                ex_log.setLevel('INFO')
                deployment = Deployment(
                    hosts=self.hosts,
                    env_file='/home/sirimie/env/mywheezy-x64-base.env')
                self.hosts, _ = deploy(deployment)

                Remote("rm -f /home/Work/sgcbntier/paasage_demo/csv/REQTASK_*",
                       self.hosts).run()
                Remote(
                    "rm -f /home/Work/sgcbntier/paasage_demo/platform_aws.xml",
                    self.hosts).run()
                Remote("rm -f /home/Work/sgcbntier/paasage_demo/cloud_ec2.xml",
                       self.hosts).run()

                Put(self.hosts, [
                    "run_all_execo.py", "xml_gen_execo.py", "conf.xml",
                    "platform_aws.xml", "cloud_ec2.xml"
                ],
                    remote_location="/home/Work/sgcbntier/paasage_demo/").run(
                    )
                logger.info("Done")

                if len(self.hosts) == 0:
                    break

                # Initializing the resources and threads
                available_hosts = [
                    host for host in self.hosts for i in range(
                        get_host_attributes(host)['architecture']['smt_size'])
                ]

                threads = {}

                # Creating the unique folder for storing the results
                comb_dir = self.result_dir + '/csv_results'
                if not os.path.exists(comb_dir):
                    os.mkdir(comb_dir)

                # Checking that the job is running and not in Error
                while self.is_job_alive() or len(threads.keys()) > 0:
                    job_is_dead = False
                    while self.options.n_nodes > len(available_hosts):
                        tmp_threads = dict(threads)
                        for t in tmp_threads:
                            if not t.is_alive():
                                available_hosts.append(tmp_threads[t]['host'])
                                del threads[t]
                        sleep(5)
                        if not self.is_job_alive():
                            job_is_dead = True
                            break
                    if job_is_dead:
                        break

                    # Getting the next combination
                    comb = self.sweeper.get_next()
                    if not comb:
                        while len(threads.keys()) > 0:
                            tmp_threads = dict(threads)
                            for t in tmp_threads:
                                if not t.is_alive():
                                    del threads[t]
                            logger.info('Waiting for threads to complete')
                            sleep(20)
                        break

                    host = available_hosts[0]
                    available_hosts = available_hosts[1:]

                    t = Thread(target=self.workflow,
                               args=(comb, host, comb_dir))
                    threads[t] = {'host': host}
                    t.daemon = True
                    t.start()

                if not self.is_job_alive():
                    job_is_dead = True

                if job_is_dead:
                    self.oargrid_job_id = None

        finally:
            if self.oargrid_job_id is not None:
                if not self.options.keep_alive:
                    logger.info('Deleting job')
                    oargriddel([self.oargrid_job_id])
                else:
                    logger.info('Keeping job alive for debugging')
コード例 #8
0
import time
import os
import sys
from sys import argv
from pprint import pprint
from diet_deploy import DietDeploy, getNodesfromFile
from diet_utils import get_results, get_node_name, writeNodesToFile, file_len

import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages

from time import gmtime, strftime

logger.setLevel(logging.INFO)

if len(sys.argv) > 1:
    script, oargrid_job_id = argv
    oargrid_job_id = int(oargrid_job_id)
else: 
    oargrid_job_id = -1 # -1 for a deploying with a new reservation || > 0 for working with an existing reservation


ssh_key = "/tmp/oargrid/oargrid_ssh_key_dbalouek_"+str(oargrid_job_id)
env = "http://public.lyon.grid5000.fr/~dbalouek/envs/debian/wheezy-x64-diet.dsc"
walltime = '02:00:00'
n_nodes = 1
oargridsub_opts = '-t deploy -t destructive'
nodes_gr1 = "./nodes_gr1"
nodes_gr2 ="./nodes_gr2"
コード例 #9
0
ファイル: funk.py プロジェクト: mimbert/Funk
opttime.add_option("-e", "--enddate", 
                dest = "enddate", 
                default = format_oar_date(int(time()+timedelta_to_seconds(timedelta(days = 3, minutes = 1)))),    
                help = "End date in OAR date format (%default)")

parser.add_option_group(opttime)
(options, args) = parser.parse_args()

logger.debug('Options\n'+'\n'.join( [ set_style(option.ljust(20),'emph')+\
                    '= '+str(value).ljust(10) for option, value in vars(options).iteritems() if value is not None ]))

# The first arugment of the script is the program to launch after the oargridsub command 
prog = args[0] if len(args) == 1 else None

if options.verbose:
    logger.setLevel(logging.DEBUG)
elif options.quiet:
    logger.setLevel(logging.WARN)
else:
    logger.setLevel(logging.INFO)

logger.debug('Options\n'+'\n'.join( [ set_style(option.ljust(20),'emph')+\
                    '= '+str(value).ljust(10) for option, value in vars(options).iteritems() if value is not None ]))





logger.info('%s', set_style('-- Find yoUr Nodes on g5K --', 'log_header'))
logger.info('From %s to %s', set_style(options.startdate, 'emph'), 
            set_style(options.enddate, 'emph'))
コード例 #10
0
ファイル: get_state.py プロジェクト: sphilippot/vm5k
#!/usr/bin/env python
import json
from pprint import pformat
from xml.etree.ElementTree import Element, dump, SubElement, parse
from execo import logger, default_connection_params, sleep, TaktukPut, TaktukRemote
from execo_g5k import get_host_site, get_host_cluster, get_cluster_site
from vm5k import default_vm
from vm5k.utils import prettify

logger.setLevel('DETAIL')

default_connection_params['user'] = '******'

def _default_xml_value(key):
    return default_vm[key] if key not in vm.attrib else vm.get(key)

def get_load_color(load):
    """Create 10 steps of colors based on the load"""
    n = load // 10
    R = 255 * n / 10
    G = (255 * (10-n))/10; 
    B=0
    return '#%02x%02x%02x' % (int(R), int(G), int(B))

logger.info('Reading initial topo')
tree = parse('final_topo.xml')
state = tree.getroot()

hosts = sorted([host.get('id') for host in state.findall('.//host') if host.get('state') == 'OK'],
                       key=lambda host: (host.split('.', 1)[0].split('-')[0],
                                    int(host.split('.', 1)[0].split('-')[1])))