コード例 #1
0
ファイル: config.py プロジェクト: alal3177/downscaling
 def __init__(self, afile):
     self.afile = afile
     self.config = read_config(self.afile)
     default_dict = self.config.defaults()
     self.threshold = default_dict['threshold']
     self.downscaler_interval = int(default_dict['downscaler_interval'])
     self.policy_in_place = default_dict['policy_in_place']
コード例 #2
0
ファイル: config.py プロジェクト: alal3177/downscaling
 def __init__(self, file):
     self.file = file
     self.config = read_config(self.file)
     default_dict = self.config.defaults()
     self.cloud = default_dict['cloud']
     self.instance_type = default_dict['instance_type']
     self.image_id = default_dict['image_id']
     self.script_path = default_dict['script_path']
コード例 #3
0
ファイル: config.py プロジェクト: alal3177/downscaling
 def __init__(self, file):
     self.file = file
     self.config = read_config(file)
     default_dict = self.config.defaults()
     self.key_name = default_dict['key_name']
     self.pub_path = default_dict['pub_path']
     self.priv_path = default_dict['priv_path']
     self.initial_monitor_time_limit = default_dict['initial_monitor_time_limit']
コード例 #4
0
ファイル: config.py プロジェクト: alal3177/downscaling
 def __init__(self, file):
     self.file = file
     self.config = read_config(self.file)
     default_dict = self.config.defaults()
     self.user = default_dict['user']
     self.submit_local = default_dict['submit_local']
     self.directory = default_dict['directory']
     self.submit_remote = default_dict['submit_remote']
     self.log_remote = default_dict['log_remote']
コード例 #5
0
ファイル: automaton.py プロジェクト: tvoran/automaton
def main():
    (options, args) = parse_options()
    configure_logging(options.debug)
    config = read_config(options.config_file)
    signal.signal(signal.SIGINT, clean_exit)
    automaton = Automaton(config)
    automaton.start()
    # wake every seconed to make sure signals are handled by the main thread
    # need this due to a quirk in the way Python threading handles signals
    while automaton.isAlive():
        automaton.join(timeout=1.0)
コード例 #6
0
ファイル: automaton.py プロジェクト: tvoran/automaton
def main():
    (options, args) = parse_options()
    configure_logging(options.debug)
    config = read_config(options.config_file)
    signal.signal(signal.SIGINT, clean_exit)
    automaton = Automaton(config)
    automaton.start()
    # wake every seconed to make sure signals are handled by the main thread
    # need this due to a quirk in the way Python threading handles signals
    while automaton.isAlive():
        automaton.join(timeout=1.0)
コード例 #7
0
ファイル: config.py プロジェクト: alal3177/downscaling
    def __init__(self, file):
        self.file = file
        self.config = read_config(self.file)
        default_dict = self.config.defaults()
        self.url = default_dict['url']
        self.port = default_dict['port']
        self.access_id = default_dict['access_id']
        self.secret_key = default_dict['secret_key']
        self.launch_name = default_dict['launch_name']

        self.domain_name_prefix = default_dict['domain_name']
        self.domain_name = None # Will be assigned later based on the prefix and the timestamp
コード例 #8
0
ファイル: phorque.py プロジェクト: cu-csc/phorque
def main():
    (options, args) = parse_options()
    configure_logging(options.debug)
    config = read_config(options.config_file)
    signal.signal(signal.SIGINT, clean_exit)
    phorque = Phorque(config)
    LOG.info("Starting Phorque thread")
    phorque.daemon = True
    phorque.start()
    # wake every second to make sure signals can be handled by the main thread
    while phorque.isAlive():
        phorque.join(timeout=1.0)
コード例 #9
0
ファイル: config.py プロジェクト: alal3177/downscaling
    def __init__(self, file):
        self.file = file
        self.config = read_config(self.file)
        cloud_names = self.config.sections()

        self.worker_groups = list()
        for cloud in cloud_names:
            items = self.config.items(cloud)
            dict = {'cloud': cloud}
            # Form a dictionary out of items list
            for pair in items:
                dict[pair[0]] = pair[1]
            self.worker_groups.append(dict)
コード例 #10
0
ファイル: config.py プロジェクト: alal3177/downscaling
 def __init__(self, file):
     self.file = file
     self.config = read_config(self.file)
     self.list = self.config.sections()
コード例 #11
0
ファイル: clouds.py プロジェクト: alal3177/downscaling
 def get_desired_data(self):
     desired_dict = {"hotel":0, "sierra":0}
     config_data = read_config(self.config_file)
     desired_dict['hotel'] = config_data.getint("hotel","desired")
     desired_dict['sierra'] = config_data.getint("sierra","desired")
     return desired_dict
コード例 #12
0
ファイル: clouds_and_condor.py プロジェクト: dmdu/downscaling
 def __init__(self, input_dir):
     self.input_dir = input_dir
     self.config_data = read_config(os.path.join(self.input_dir,"workers.conf"))
     self.cpobj = log_parser.CondorParser(os.path.join(self.input_dir,"sleep.log"))
     self.cpobj.parse_file()
     self.condor_data = self.cpobj.condor_jobs_db
コード例 #13
0
ファイル: test_post.py プロジェクト: cluo/learingPython
import os
import sys
import web
import json
import time
import hashlib
from lib import payload
from lib.util import MysqlHandler, read_config
from lib.logger import Logger

log = Logger(config_file='conf/server.conf',
             logfile=os.path.join('logs', os.path.basename(os.path.realpath(sys.argv[0])).replace(".py", '.log'))
)

mysql_handler = MysqlHandler()
opts = read_config()


def test(data):
    sreq = payload.SREQ("tcp://%(job_ip)s:%(job_port)s" % opts)
    try:
        ret = sreq.send(data, timeout=2)
    except Exception, e:
        print e


if __name__ == '__main__':
    # data = {
    #     "job_id": "201408271734511409132091",
    #     "job_type": 0,
    #     "release_type": "jetty",
コード例 #14
0
ファイル: config.py プロジェクト: dmdu/automaton
 def __init__(self, options):
     self.options = options
     self.globals = GlobalConfig(read_config(options.global_file))
     self.clouds = CloudsConfig(read_config(options.clouds_file))
     self.benchmarking = BenchmarkingConfig(read_config(options.benchmarking_file))
コード例 #15
0
ファイル: engine.py プロジェクト: dmurtari/automaton
of staged deployment.  We will implement the execution workflow here
when we have more time.

"""

import shutil

from lib import util
from deployment import common
from deployment.executor import Executor

# config file path
config_file = "../etc/global.conf"

# clone the repo locally
git_repo_location = util.read_config(config_file).get("DEFAULT",
                                                      "git_repo_location")
my_local_folder = util.clone_git_repo(git_repo_location)

# we fill a dict with our stages
git_repo_home = util.read_config(config_file).get("DEFAULT", "git_repo_home")
stages = common.get_stages("client", my_local_folder, git_repo_home)

# remove the directory since it is not needed anymore
shutil.rmtree(my_local_folder, ignore_errors=True)

# clone the repo to the vm
ssh_priv_key = util.read_config(config_file).get("DEFAULT", "ssh_priv_key")
cmd = "git clone %s %s" % (git_repo_location, git_repo_home)
remote_clone_result = util.RemoteCommand("vm-148-120.uc.futuregrid.org",
                                         ssh_priv_key, cmd).execute()
コード例 #16
0
 def __init__(self, options):
     self.options = options
     self.globals = GlobalConfig(read_config(options.global_file))
     self.clouds = CloudsConfig(read_config(options.clouds_file))
     self.benchmarking = BenchmarkingConfig(
         read_config(options.benchmarking_file))
コード例 #17
0
ファイル: engine.py プロジェクト: alal3177/automaton
"""
This entire file is just an example to demonstrate functionality of staged deployment.
We will implement the execution workflow here when we have more time.
"""

import shutil

from lib import util
from deployment import common
from deployment.executor import Executor

# config file path
config_file = "../etc/global.conf"

# clone the repo locally
my_local_folder = util.clone_git_repo(util.read_config(config_file).get("DEFAULT","git_repo_location"))

# we fill a dict with our stages
stages = common.get_stages("client", my_local_folder, util.read_config(config_file).get("DEFAULT","git_repo_home"))


# remove the directory since it is not needed anymore
shutil.rmtree(my_local_folder,ignore_errors=True)

# clone the repo to the vm
remote_clone_result = util.RemoteCommand("vm-148-120.uc.futuregrid.org",\
    util.read_config(config_file).get("DEFAULT", "ssh_priv_key"),
    "git clone %s %s" % (util.read_config(config_file).get("DEFAULT","git_repo_location") ,
                         util.read_config(config_file).get("DEFAULT","git_repo_home"))).execute()

# initiate executor class with the stages
コード例 #18
0
This entire file is just an example to demonstrate functionality of staged deployment.
We will implement the execution workflow here when we have more time.
"""

import shutil

from lib import util
from deployment import common
from deployment.executor import Executor

# config file path
config_file = "../etc/global.conf"

# clone the repo locally
my_local_folder = util.clone_git_repo(
    util.read_config(config_file).get("DEFAULT", "git_repo_location"))

# we fill a dict with our stages
stages = common.get_stages(
    "client", my_local_folder,
    util.read_config(config_file).get("DEFAULT", "git_repo_home"))

# remove the directory since it is not needed anymore
shutil.rmtree(my_local_folder, ignore_errors=True)

# clone the repo to the vm
remote_clone_result = util.RemoteCommand("vm-148-120.uc.futuregrid.org",\
    util.read_config(config_file).get("DEFAULT", "ssh_priv_key"),
    "git clone %s %s" % (util.read_config(config_file).get("DEFAULT","git_repo_location") ,
                         util.read_config(config_file).get("DEFAULT","git_repo_home"))).execute()