コード例 #1
0
def scale_microservice(service_name, value):

    # Read the current config file
    micro_config = util.read_config_file(micro_config_name)

    print("### Scaling micro_service: " + service_name + " of value: " +
          str(value))

    current_replica = plan.get_micro_replicas(service_name)
    max_replica = int(micro_config.get(service_name, 'max_replica'))
    min_replica = int(micro_config.get(service_name, 'min_replica'))

    # This represents the total number of services 'after' it has been scaled
    total_replica = current_replica + value

    if total_replica > max_replica:
        print('### Abort micro scaling for microservice: ' + service_name +
              ' due to max replica limit: ' + str(max_replica) + '.\n')
        return

    elif total_replica < min_replica:
        print('### Abort micro scaling for microservice: ' + service_name +
              ' due to min replica limit: ' + str(min_replica) + '.\n')
        return

    else:
        print("====> Scaling microservice: " + service_name + " to: " +
              str(total_replica) + "\n")
        result = util.run_command("sudo docker service scale " + service_name +
                                  "=" + str(total_replica))
        return
コード例 #2
0
def db_connect():
    '''
        Method for database creation and connection using predefined
        database settings.
    '''
    db_url = read_config_file()
    return create_engine(db_url['SQLALCHEMY_DATABASE_URI'])
コード例 #3
0
def main():

    while True:

        micro_config = util.read_config_file(micro_config_file_name)
        macro_config = util.read_config_file(macro_config_file_name)

        micro_util = monitor.get_microservices_utilization()
        #micro_util = monitor.get_container_utilization()
        macro_util = monitor.get_macroservices_utilization()

        compare_cpu_util(micro_config, micro_util, macro_config, macro_util)

        print("****** Completed monitoring! Wait for " +
              str(monitoring_interval) + " seconds. *****")

        util.progress_bar(monitoring_interval)
コード例 #4
0
def create_vm(new_vm_name, user_name, password, label, value):

    # Read the config file and get the Elascale directory
    config = util.read_config_file(config_name)

    elascale_dir = config.get('swarm', 'elascale_root_dir')

    #provision_vm bash script
    prov_vm_file = elascale_dir + "./provision_vm.sh"

    command = "sudo " + prov_vm_file + " " + new_vm_name + " " + user_name + " " + password + " " + label + " " + value
    result = util.provision_vm(command)
コード例 #5
0
def main() -> None:

    # pylint: disable=no-member

    config = util.read_config_file("quality-control")
    parser = load_args_parser(config)
    args = parser.parse_args(sys.argv)

    if not args.check_working and not args.check_daily and not args.check_history:
        logger.info("  [default to all sources]")
        args.check_working = True
        args.check_daily = True
        args.check_history = True

    config = QCConfig(
        results_dir=args.results_dir,
        save_results=args.save_results,
        enable_experimental=args.enable_experimental,
        images_dir=args.images_dir,
        plot_models=args.plot_models,
    )
    if config.save_results:
        logger.warning(f"  [save results to {args.results_dir}]")
    if config.plot_models:
        logger.warning(f"  [save forecast curves to {args.images_dir}]")

    if len(args.state) != 0:
        logger.error("  [states filter not implemented]")

    ds = DataSource()

    if args.check_working:
        logger.info(
            "--| QUALITY CONTROL --- GOOGLE WORKING SHEET |---------------------------------------------------"
        )
        log = check_working(ds, config=config)
        log.print()

    if args.check_daily:
        logger.info(
            "--| QUALITY CONTROL --- CURRENT |-----------------------------------------------------------"
        )
        log = check_current(ds, config=config)
        log.print()

    if args.check_history:
        logger.info(
            "--| QUALITY CONTROL --- HISTORY |-----------------------------------------------------------"
        )
        log = check_history(ds)
        log.print()
コード例 #6
0
    def reset(self):
        self._working = None
        self._current = None
        self._history = None

        config = util.read_config_file("quality-control")
        self.config = QCConfig(
            results_dir=config["CHECKS"]["results_dir"], 
            enable_experimental=config["CHECKS"]["enable_experimental"] == "True",
            save_results=config["CHECKS"]["save_results"] == "True",
            images_dir=config["MODEL"]["images_dir"], 
            plot_models=config["MODEL"]["plot_models"] == "True",
        )

        self.ds = DataSource()
コード例 #7
0
ファイル: dbgraphs.py プロジェクト: majunyang/PythonDBAGraphs
def ashcpu():
    """
    Generates a graph that shows cpu usage within an Oracle database
    by various parts of the application. The graph is configured by lines in 
    the text file ashcpufile.txt.
    
    Each line is part of a client machine name and a label
    for machines with that pattern.
    
    For example:
    
    abcd WEBFARM
    
    Any client machines with the string abcd in their name 
    will have their database cpu usage grouped into the category WEBFARM.
    
    The graph is hard coded to only look at CPU usage between 8 am and 5 pm
    of the day of the week specified. This is intended to look at CPU usage
    during the work day.
    
    """

    day = raw_input('Enter day of week: ')

    user = util.my_oracle_user

    c = perfq.cpubymachine(day, 8, 17)

    lines = util.read_config_file(util.config_dir, database + util.ashcpu_file)

    for l in lines:
        args = l.split()
        if len(args) == 2:
            c.add_machine(args[0], args[1])

    querytext = c.build_query()

    user = util.my_oracle_user
    password = util.get_oracle_password(database)
    d = db.connection(user, password, database)
    h = saveawr.day_history(d, day, 'ASHCPUBYMACHINE', querytext)

    results = h.save_day_results()

    column_names = h.get_column_names()

    myplot.plot_cpu_by_day(database, day, results, column_names)
コード例 #8
0
ファイル: dbgraphs.py プロジェクト: majunyang/PythonDBAGraphs
def groupsigs():
    """
    This shows the average elapsed time and total number of executions for 
    a group of SQL statements defined by their force matching signature.
    A signature represents a group of queries that are the same except for their
    constants. The goal of this query is to pick some group of queries 
    that we care about such as the main queries the users use every day and
    show their performance over time. It does hide the details of the individual
    queries but may have value if we choose the best set of signatures.   
    """

    user = util.my_oracle_user

    queryobj = perfq.groupofsignatures()

    lines = util.read_config_file(util.config_dir,
                                  database + util.groupsigs_file)

    for line in lines:
        if len(line) > 0:
            queryobj.add_signature(int(line))

    querytext = queryobj.build_query()

    user = util.my_oracle_user
    password = util.get_oracle_password(database)
    dbconn = db.connection(user, password, database)

    results = dbconn.run_return_flipped_results(querytext)

    if results == None:
        print "No results returned"
        return

    # plot query

    title = "SQL matching group of signatures on " + database + " database elapsed versus executions"
    top_label = "Number of executions"
    bottom_label = "Averaged Elapsed Microseconds"

    date_time = results[0]
    executions = results[1]
    avg_elapsed = results[2]

    myplot.frequency_average(title, top_label, bottom_label, date_time,
                             executions, avg_elapsed)
コード例 #9
0
def prepare_for_beats(vm_name):

    # Read the current config file
    config = util.read_config_file(config_name)

    print('\nDeploying beats on the VM: ' + vm_name)

    # Get where the config files are located. This can be found on config.ini file
    elascale_config_dir = config.get('swarm', 'elascale_config_dir')

    # Get where the Elascale root directory are located. This can be found on config.ini file
    elascale_certs_dir = config.get('swarm', 'elascale_certs_dir')

    # Copy the metricbeat.yml on swarm-master to the new node
    result = util.run_command("sudo docker-machine scp " +
                              elascale_config_dir + "metricbeat.yml " +
                              vm_name + ":~")

    # Change the hostname on the new metricbeat.yml
    sed_command = "sed -i \"s/name: \".*\"/name: \"" + vm_name + "\"/g\" ~/metricbeat.yml"
    result = util.run_command("sudo docker-machine ssh " + vm_name + " " +
                              sed_command)

    # Copy the dockbeat.yml on swarm-master to the new node
    result = util.run_command("sudo docker-machine scp " +
                              elascale_config_dir + "dockbeat.yml " + vm_name +
                              ":~")

    # Copy elasticsearch_certificate.yml to the new machine
    result = util.run_command("sudo docker-machine scp " + elascale_certs_dir +
                              "elasticsearch_certificate.pem " + vm_name +
                              ":~/certs")

    # Create /volumes/dockbeat-logs dir on the new node (required for dockbeat to work properly)
    result = util.run_command("sudo docker-machine ssh " + vm_name +
                              " sudo mkdir -p /volumes/dockbeat-logs/")

    print(
        'dockbeat and metricbeat yml files have been copied/configured successfully.'
    )
コード例 #10
0
def scale_macroservice(host_name, value):

    # Read the current config file
    macro_config = util.read_config_file(macro_config_name)

    print("### Scaling macro_service: " + host_name + " of value: " +
          str(value))

    base_name = plan.get_macro_base_name(host_name)

    current_replica = plan.get_macro_replicas(base_name)

    max_replica = int(macro_config.get(base_name, 'max_replica'))
    min_replica = int(macro_config.get(base_name, 'min_replica'))

    # This represents the total number of services 'after' it has been scaled
    # 'value' variable tells whether to scale down ( - value) or scale up ( + value)
    total_replica = current_replica + value

    if total_replica > max_replica:
        print('### Abort macro scaling for ' + host_name +
              ' due to max replica limit: ' + str(max_replica) + '.\n')
        return

    if total_replica < min_replica:
        print('### Abort macro scaling for ' + host_name +
              ' due to min replica limit: ' + str(min_replica) + '.\n')
        return

    else:
        if value > 0:
            print("====> Scaling out the macroservice: " + host_name +
                  " by 1\n")
            docker_machine_scale(host_name, 'scale-out')  # scale-out
        else:
            #value < 0
            print("====> Scaling in the macroservice: " + host_name +
                  " by -1\n")
            docker_machine_scale(host_name, 'scale-in')  # scale-in
コード例 #11
0
ファイル: dbgraphs.py プロジェクト: majunyang/PythonDBAGraphs
def sigselapctcpu():
    """
    Plots elapsed for a group of sql statements based
    on their signatures against percent CPU of the host. 
    """

    user = util.my_oracle_user

    queryobj = perfq.groupofsignatures()

    lines = util.read_config_file(util.config_dir,
                                  database + util.groupsigs_file)

    for line in lines:
        if len(line) > 0:
            queryobj.add_signature(int(line))

    querytext = queryobj.build_query3()

    user = util.my_oracle_user
    password = util.get_oracle_password(database)
    dbconn = db.connection(user, password, database)

    results = dbconn.run_return_flipped_results(querytext)

    if results == None:
        print "No results returned"
        return

    # plot query

    title = "SQL matching group of signatures on " + database + " database elapsed versus cpu"
    y_label = "Minutes versus Percentage"

    number_of_plots = 2

    plot_names = ["CPU % Busy", "Elapsed in Minutes"]

    myplot.plotmulti(title, y_label, number_of_plots, plot_names, results)
コード例 #12
0
ファイル: dbgraphs.py プロジェクト: majunyang/PythonDBAGraphs
def sigscpuio():
    """
    Plots elapsed, cpu, and io for a group of sql statements based
    on their signatures.  
    """

    user = util.my_oracle_user

    queryobj = perfq.groupofsignatures()

    lines = util.read_config_file(util.config_dir,
                                  database + util.groupsigs_file)

    for line in lines:
        if len(line) > 0:
            queryobj.add_signature(int(line))

    querytext = queryobj.build_query2()

    user = util.my_oracle_user
    password = util.get_oracle_password(database)
    dbconn = db.connection(user, password, database)

    results = dbconn.run_return_flipped_results(querytext)

    if results == None:
        print "No results returned"
        return

    # plot query

    title = "SQL matching group of signatures on " + database + " database elapsed CPU IO"
    y_label = "Seconds"

    number_of_plots = 3

    plot_names = ["Elapsed", "CPU+IO", "IO"]

    myplot.plotmulti(title, y_label, number_of_plots, plot_names, results)
コード例 #13
0
ファイル: dbgraphs.py プロジェクト: majunyang/PythonDBAGraphs
def sigfour():
    """
    Plots executions, average elapsed, cpu percent
    and average single block IO time. 
    """

    user = util.my_oracle_user

    queryobj = perfq.groupofsignatures()

    lines = util.read_config_file(util.config_dir,
                                  database + util.groupsigs_file)

    for line in lines:
        if len(line) > 0:
            queryobj.add_signature(int(line))

    querytext = queryobj.build_query4()

    user = util.my_oracle_user
    password = util.get_oracle_password(database)
    dbconn = db.connection(user, password, database)

    results = dbconn.run_return_flipped_results(querytext)

    if results == None:
        print "No results returned"
        return

    # plot query

    title = "SQL matching group of signatures on " + database + " database four graphs"

    myplot.plot_four(title, "CPU % Busy", "Number of executions (/100000)",
                     "Average Elapsed Time (microseconds)",
                     "Average single block read time (ms)", results[0],
                     results[1], results[2], results[3], results[4])
コード例 #14
0
ファイル: db.py プロジェクト: xdav/platypus
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.exc import IntegrityError
from sqlalchemy_utils import database_exists
from datetime import datetime
from util import generate_salt, generate_password_hash, read_config_file


class DuplicateLoginException(Exception):

    def __init__(self, message):
        super(DuplicateLoginException, self).__init__(message)

_DATABASE = 'sqlite:///platypus.db'
_DEV_DATABASE = 'sqlite:///platypus-dev.db'
database = _DEV_DATABASE if read_config_file('dev') else _DATABASE

setup = database_exists(database)
engine = create_engine(database, echo=True)
session = scoped_session(sessionmaker(
    autocommit=False,
    autoflush=False,
    bind=engine))
Base = declarative_base()
Base.query = session.query_property()
from model import Article, User, Role


def init():
    Base.metadata.create_all(bind=engine)
    if not setup:
コード例 #15
0
    if calendar_type == "Next" and today >= prev_date and today < curr_date:
        return True
    return False


def get_random_emoji():
    all_emoji = sh.get_emoji()["emoji"]
    return random.choice(list(all_emoji.keys()))


if __name__ == "__main__":
    testing_slack_channel = None
    if len(sys.argv) > 1:
        testing_slack_channel = sys.argv[1]

    config_data = read_config_file("config.env")

    CREDENTIALS = get_conf_or_env("CREDENTIALS", config_data)
    CREDENTIALS_FILE = get_conf_or_env("CREDENTIALS_FILE", config_data,
                                       "credentials.json")
    WORKBOOK = get_conf_or_env("WORKBOOK", config_data)
    WORKSHEET_META_TAB = get_conf_or_env("WORKSHEET_META_TAB", config_data)
    SLACK_TOKEN = get_conf_or_env("SLACK_TOKEN", config_data)
    SLACK_USERNAME = get_conf_or_env("SLACK_USERNAME", config_data)
    SLACK_ICON_URL = get_conf_or_env("SLACK_ICON_URL", config_data)
    WORKSHEET_PEOPLE_TAB = get_conf_or_env("WORKSHEET_PEOPLE_TAB", config_data)

    required_variables = (
        "WORKBOOK WORKSHEET_META_TAB SLACK_TOKEN SLACK_USERNAME SLACK_ICON_URL"
        .split(" "))
コード例 #16
0
    if calendar_type == 'Current' and today >= curr_date and today < next_date:
        return True
    elif calendar_type == 'Next' and today >= prev_date and today < curr_date:
        return True
    return False

def get_random_emoji():
    all_emoji = sh.get_emoji()['emoji']
    return random.choice(list(all_emoji.keys()))

if __name__ == '__main__':
    testing_slack_channel = None
    if len(sys.argv) > 1:
        testing_slack_channel = sys.argv[1]

    config_data = read_config_file('config.env')

    CREDENTIALS_FILE = get_conf_or_env('CREDENTIALS_FILE', config_data, 'credentials.json')
    WORKBOOK = get_conf_or_env('WORKBOOK', config_data)
    WORKSHEET_META_TAB = get_conf_or_env('WORKSHEET_META_TAB', config_data)
    SLACK_TOKEN = get_conf_or_env('SLACK_TOKEN', config_data)
    SLACK_USERNAME = get_conf_or_env('SLACK_USERNAME', config_data)
    SLACK_ICON_URL = get_conf_or_env('SLACK_ICON_URL', config_data)
    WORKSHEET_PEOPLE_TAB = get_conf_or_env('WORKSHEET_PEOPLE_TAB', config_data)

    required_variables = 'CREDENTIALS_FILE WORKBOOK WORKSHEET_META_TAB SLACK_TOKEN SLACK_USERNAME SLACK_ICON_URL'.split(' ')

    for variable in required_variables:
        if eval(variable) is None:
            logger.error('Missing ' + variable)
            exit(1)
コード例 #17
0
ファイル: run.py プロジェクト: jBlinden/private-codis
    values = parser.parse_args()

    if values.N is not None:
        N = values.N
    if values.config is not None:
        config = values.config
    if values.indb is not None:
        indb = values.indb
    if values.port is not None:
        port = values.port
    if values.local is not None:
        local = values.local

    print("Collecting protocol parameters")
    C, Bits = util.read_config_file(config)

    print("Generating test case")
    util.GenerateTestCase(N, C, Bits, indb, config)

    if local == 1:
        print("Running local test")
        client = Popen([
            "./tests/ProtocolClient", "./data/info.txt", "./data/client/",
            "127.0.0.1",
            str(port)
        ])
        server = Popen([
            "./tests/ProtocolServer", "./data/info.txt", "./data/server/",
            str(port)
        ],
コード例 #18
0
        return self.db_connection.run_return_all_results(query_string)
                        
    def get_column_names(self):
        return self.db_connection.get_column_names()
        
# Main program starts here

database,dbconnection = util.script_startup('Database CPU by Application Area')

day=util.input_no_default('Enter day of week: ')
    
user=util.my_oracle_user
 
c = cpubymachine(day,8,17)
    
lines = util.read_config_file(util.config_dir,database+util.ashcpu_file)

for l in lines:
    args = l.split()
    if len(args) == 2:
        c.add_machine(args[0],args[1])
    
querytext = c.build_query()
    
h = day_history(dbconnection,day,'ASHCPUBYMACHINE',querytext)
    
results = h.save_day_results()
    
column_names = h.get_column_names()
    
# Load global variables for graph    
コード例 #19
0
import util
import signatures

database,dbconnection = util.script_startup('Stats for SQL statments by signature')

start_time=util.input_with_default('Start date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-1900 12:00:00')

end_time=util.input_with_default('End date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-2200 12:00:00')

instance_number=util.input_with_default('Database Instance (1 if not RAC)','1')

queryobj = signatures.groupofsignatures()

queryobj.set_start_end_instance(start_time,end_time,instance_number)

lines = util.read_config_file(util.config_dir,database+util.groupsigs_file)

for line in lines:
    if len(line) > 0:
        queryobj.add_signature(int(line))

querytext = queryobj.build_query()

results = dbconnection.run_return_flipped_results(querytext)

util.exit_no_results(results)

# plot query
    
myplot.title = "SQL matching group of signatures on "+database+" database, instance "+instance_number+" elapsed versus executions"
myplot.ylabel1 = "Number of executions"
コード例 #20
0
ファイル: ashcpu.py プロジェクト: yaohongdi/PythonDBAGraphs
    def get_column_names(self):
        return self.db_connection.get_column_names()


# Main program starts here

database, dbconnection = util.script_startup(
    'Database CPU by Application Area')

day = util.input_no_default('Enter day of week: ')

user = util.my_oracle_user

c = cpubymachine(day, 8, 17)

lines = util.read_config_file(util.config_dir, database + util.ashcpu_file)

for l in lines:
    args = l.split()
    if len(args) == 2:
        c.add_machine(args[0], args[1])

querytext = c.build_query()

h = day_history(dbconnection, day, 'ASHCPUBYMACHINE', querytext)

results = h.save_day_results()

column_names = h.get_column_names()

# Load global variables for graph