コード例 #1
0
ファイル: lambda_fn.py プロジェクト: met-office-lab/dre-skill
def go(event, context, speech_config_name="default", cache=ForecastCache()):
    default_values = database.get_config(event["session"]["user"]["userId"])
    speech_config = database.get_speech_conf(speech_config_name)
    try:
    	Session = conversation.mk_session_class(IntentRequestHandlers)
        session = Session(event, context, speech_config, default_values, cache=cache)
        return session.respond()
    except conversation.PrimarySlotError as e:
        return e.message
コード例 #2
0
ファイル: pg_mb.py プロジェクト: vtomasr5/pg_mb
def pg_mb(logger, action):
    """
    Looping all clusters and databases.

    Action:
        0: Backup
        1: Restore
    TODO: Action is not boolean, because in the future, can incrementase the
    actions to execute.
    """

    # Get all business.
    companies = database.get_business()
    logger.debug('Get all business')
    for b in companies['data']:
        business_id = b['id']
        business = b['name']
        logger.info('Process business: %s - %s' % (business_id, business))

        # Get backups directory for business configuration.
        backupdir = database.get_config(business_id, 'backupdir')
        backupdir = backupdir['data']['value'] + '/' + business
        logger.info('Backup root directory: %s' % backupdir)

        # Get all PostgreSQL clusters. Return QA domain. Does not change.
        clusters = database.get_clusters(business_id)
        logger.debug('Get all clusters')
        for c in clusters['data']:
            cluster_id = c['id']
            clustername = c['name']
            logger.info('Process cluster: %s - %s' % (cluster_id, clustername))

            # Get cluster databases.
            logger.debug('Get all databases')
            databases = database.get_databases(cluster_id, database.production)
            for database_id, dbname in databases['data']:
                logger.info(
                    'Process database: %s - %s' % (database_id, dbname)
                )

                # Determine scheduled backup.
                logger.debug('Get schedulers')
                schedulers = backup.get_scheduler()
                for scheduler in schedulers:
                    logger.info('%s backup' % (scheduler.capitalize()))
                    if action == 0:
                        # Run backup.
                        backup.dump(
                            logger,
                            scheduler,
                            clustername,
                            cluster_id,
                            dbname,
                            database_id,
                            backupdir
                        )
コード例 #3
0
def go(event, context, speech_config_name="default", cache=ForecastCache()):
    default_values = database.get_config(event["session"]["user"]["userId"])
    speech_config = database.get_speech_conf(speech_config_name)
    try:
        Session = conversation.mk_session_class(IntentRequestHandlers)
        session = Session(event,
                          context,
                          speech_config,
                          default_values,
                          cache=cache)
        return session.respond()
    except conversation.PrimarySlotError as e:
        return e.message
コード例 #4
0
ファイル: pg_mb.py プロジェクト: vtomasr5/pg_mb
def pg_cb(logger, cluster, db):
    """
    Custom backup: One cluster and one database.
    """

    # Verify cluster and database.
    logger.debug('Send to verify: Cluster: %s; Database: %s' % (cluster, db))
    data = database.verify(cluster, db)
    logger.debug('Verify result: %s' % data)
    if 'data' in data or data.status_code < 400:

        # Get backup directory for business configuration.
        backupdir = database.get_config(data['data']['business']['id'], 'backupdir')
        backupdir = backupdir['data']['value'] + '/' + data['data']['business']['name']
        logger.info('Backup root directory: %s' % backupdir)

        # Dump in special dir.
        custom_dir = backup.manual
        logger.info(
            'Start manual backup: Database %s; Cluster: %s'
            % (db, cluster)
        )
        # Run backup.
        backup.dump(
            logger,
            custom_dir,
            cluster,
            data['data']['cluster']['id'],
            db,
            data['data']['database']['id'],
            backupdir
        )
        logger.info('Backup done')

    else:
        logger.error('Cluster or database cannot exists.')
コード例 #5
0
import pickle

import os
import sys
sys.path.append("..")

from reduced_dotmap import DotMap
from conversation import *
from dre.decision import Loc

from intent_processing.lambda_fn import *
from intent_processing.intent_request_handlers import IntentRequestHandlers

from database import database
speech_config = database.get_speech_conf("tests")
full_conf = database.get_config('tests')

Session = mk_session_class(IntentRequestHandlers)


class LambdaDecisionTest(unittest.TestCase):
    base = os.path.split(__file__)[0]
    with open(os.path.join(base, 'json_packets', 'in', 'sample_event.json'),
              'r') as evtfile:
        event = yaml.safe_load(evtfile.read())

    cache = ForecastCache()
    with open(os.path.join(base, 'data', 'testForecast.pkl'), "rb") as f:
        timesteps = pickle.load(f)
    cache.cache_forecast(timesteps, Loc(lat=50.7, lon=-3.5))
コード例 #6
0
import pickle

import os
import sys
sys.path.append("..")

from reduced_dotmap import DotMap
from conversation import *
from dre.decision import Loc

from intent_processing.lambda_fn import *
from intent_processing.intent_request_handlers import IntentRequestHandlers

from database import database
speech_config = database.get_speech_conf("tests")
full_conf = database.get_config('tests')

Session = mk_session_class(IntentRequestHandlers)


class LambdaDecisionTest(unittest.TestCase):
    base = os.path.split(__file__)[0]
    with open(os.path.join(base, 'json_packets', 'in', 'sample_event.json'), 'r') as evtfile:
        event = yaml.safe_load(evtfile.read())

    cache = ForecastCache()
    with open(os.path.join(base, 'data', 'testForecast.pkl'), "rb") as f:
        timesteps = pickle.load(f)
    cache.cache_forecast(timesteps, Loc(lat=50.7, lon=-3.5))

    def testLambda(self):