level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ import socket import eventlet from ops import utils from ops import log as logging from ops.options import get_options options = get_options() LOG = logging.getLogger(__name__) def periodic_task(*args, **kwargs): """Decorator to indicate that a method is a periodic task. This decorator can be used in two ways: 1. Without arguments '@periodic_task', this will be run on every tick of the periodic scheduler. 2. With arguments, @periodic_task(ticks_between_runs=N), this will be run on every N ticks of the periodic scheduler. """
service_opts = [ { "name": 'report_interval', "default": 30, "help": 'seconds between nodes reporting state to datastore', "type": int, }, { "name": 'periodic_interval', "default": 60, "help": 'seconds between running periodic tasks', "type": int, }, ] options = get_options(service_opts, 'services') class Launcher(object): """Launch one or more services and wait for them to complete.""" def __init__(self): """Initialize the service launcher. :returns: None """ self._services = [] @staticmethod def run_server(server): """Start and wait for a server to finish.
level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ import socket import eventlet from ops import utils from ops import log as logging from ops.options import get_options options = get_options() LOG = logging.getLogger(__name__) def periodic_task(*args, **kwargs): """Decorator to indicate that a method is a periodic task. This decorator can be used in two ways: 1. Without arguments '@periodic_task', this will be run on every tick of the periodic scheduler. 2. With arguments, @periodic_task(ticks_between_runs=N), this will be run on every N ticks of the periodic scheduler.
from ops import cache from ops import utils from ops import log as logging from ops.service import db as service_db auth_opts = [ { "name": "policy", "default": "ops.api.auth.policy", "help": "", "type": str, }, ] options = get_options(auth_opts, 'auth') LOG = logging.getLogger() def load_policy(): option_split = options.policy.split(".") mod = option_split[0] fun = options.policy[options.policy.rfind('.')+1:] fn_, modpath, desc = imp.find_module(mod) fn_, path, desc = imp.find_module(fun, [os.path.join(modpath, "/".join(option_split[1:-1]))]) return imp.load_module(fun, fn_, path, desc) class BaseAuth(tornado.web.RequestHandler):
from ops import cache from ops import utils from ops import log as logging from ops.service import db as service_db auth_opts = [ { "name": "policy", "default": "ops.api.auth.policy", "help": "", "type": str, }, ] options = get_options(auth_opts, 'auth') LOG = logging.getLogger() def load_policy(): option_split = options.policy.split(".") mod = option_split[0] fun = options.policy[options.policy.rfind('.') + 1:] fn_, modpath, desc = imp.find_module(mod) fn_, path, desc = imp.find_module( fun, [os.path.join(modpath, "/".join(option_split[1:-1]))]) return imp.load_module(fun, fn_, path, desc) class BaseAuth(tornado.web.RequestHandler):
from sqlalchemy import Column, Integer, BigInteger, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float from ops import options from ops.db.models import BASE from ops.db.session import register_models options = options.get_options() class CostLog(BASE): ''' status: active: the instance is in use, means it's stilling billing deleted: the instance is deleted; means it's already settlement ''' __tablename__ = 'cost_log' id = Column(Integer, primary_key=True, autoincrement=True) tenant_id = Column(String(255)) user_id = Column(String(255)) cost = Column(Float, nullable=False, default=0) unit = Column(Float, nullable=False, default=0) status = Column(String(255), nullable=False, default='active') register_models((CostLog, ))
from ops.options import get_options auth_opts = [{ "name": "cached_backend", "default": 'redis://127.0.0.1:6379/0', "help": 'cached backend uri', "type": str, }, { "name": 'cache_timeout', "default": '3600', "help": 'cache timeout seconds', "type": str, }] options = get_options(auth_opts, 'cache') class Backend(object): def __init__(self): cached_backend = options.cached_backend host, port_db = cached_backend.split('://')[1].split(':') port, db = port_db.split("/") self.conn = redis.StrictRedis(host=host, port=port, db=db) def get(self, id, default=None): """ Return object with id """ try: ret = self.conn.get(id)
}, { "name": 'log_date_format', "default": '%Y-%m-%d %H:%M:%S', "help": 'time format of log', "type": str, }, { "name": 'logfile_mode', "default": '0644', "help": 'Default file mode used when creating log files', "type": str, }, ] options = get_options(service_opts, 'services') # our new audit level # NOTE(jkoelker) Since we synthesized an audit level, make the logging # module aware of it so it acts like other levels. logging.AUDIT = logging.INFO + 1 logging.addLevelName(logging.AUDIT, 'AUDIT') try: NullHandler = logging.NullHandler except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 class NullHandler(logging.Handler): def handle(self, record): pass
{ "name": 'sql_max_retries', "default": 10, "help": 'maximum db connection retries during startup. ' '(setting -1 implies an infinite retry count)', "type": int, }, { "name": 'sql_retry_interval', "default": 10, "help": 'interval between retries of opening a sql connection', "type": int, }, ] options = get_options(db_options, 'db') _ENGINE = None _MAKER = None def get_session(autocommit=True, expire_on_commit=False): """Return a SQLAlchemy session.""" global _MAKER if _MAKER is None: engine = get_engine() _MAKER = get_maker(engine, autocommit, expire_on_commit) session = _MAKER()
# -*- coding: utf-8 -*- import smtplib from email.mime.text import MIMEText from ops.options import get_options smtp_options = [ { "name": "smtp_uri", "default": "smtp://*****:*****@smtp_server:smtp_port", "help": "smtp auth info", "type": str, }] options = get_options(smtp_options) def send_mail(to_list,sub,content,html=False): smtp = options.smtp_uri mail_postfix = "gamewave.net" mail_host = smtp.split("@")[-1].split(":")[0] mail_user = smtp.split(":")[1].split("/")[-1] mail_pass = smtp.split("@")[0].split(":")[-1] me="AlertCenter"+"<"+mail_user+"@"+mail_postfix+">" if html: msg = MIMEText(content,_subtype='html',_charset='utf-8') else: msg = MIMEText(content,_charset='utf-8') msg['Subject'] = sub msg['From'] = me msg['To'] = ";".join(to_list)
from sqlalchemy import Column, Integer, BigInteger, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float from ops import options from ops.db.models import BASE from ops.db.session import register_models options = options.get_options() class CostLog(BASE): ''' status: active: the instance is in use, means it's stilling billing deleted: the instance is deleted; means it's already settlement ''' __tablename__ = 'cost_log' id = Column(Integer, primary_key=True, autoincrement=True) tenant_id = Column(String(255)) user_id = Column(String(255)) cost = Column(Float, nullable=False, default=0) unit = Column(Float, nullable=False, default=0) status = Column(String(255), nullable=False, default='active') register_models((CostLog,))
auth_opts = [ { "name": "cached_backend", "default": 'redis://127.0.0.1:6379/0', "help": 'cached backend uri', "type": str, }, { "name": 'cache_timeout', "default": '3600', "help": 'cache timeout seconds', "type": str, }] options = get_options(auth_opts, 'cache') class Backend(object): def __init__(self): cached_backend = options.cached_backend host, port_db = cached_backend.split('://')[1].split(':') port, db = port_db.split("/") self.conn = redis.StrictRedis(host=host, port=port, db=db) def get(self, id, default=None): """ Return object with id """ try: ret = self.conn.get(id) if ret: