Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--csv', default='')
    parser.add_argument('--default', action='store_true', default=False)
    parser.add_argument('--app-name', required=True)
    args = parser.parse_args()
    application = args.app_name
    #---------------
    config.setup(application)
    from dao.common import log
    log.setup('common')
    #---------------
    root = importlib.import_module(application)
    import_all(root.__file__)
    if args.default:
        header = ['section', 'name', 'default', 'help']
    else:
        header = ['section', 'name', 'value', 'help']
    opts = set()
    conf = config.get_config()
    for opt in config.CONFIG.get_options():
        value = opt.default if args.default \
            else conf[opt.section][opt.name]
        opts.add((opt.section, opt.name, value, opt.help))
    opts = sorted(opts)
    p = prettytable.PrettyTable(header)
    for opt in opts:
        p.add_row(opt)
    p.align = 'l'
    if args.csv:
        with open(args.csv, 'wb') as fout:
            writer = csv.writer(fout)
            writer.writerow(header)
            writer.writerows([opt for opt in opts])
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--csv', default='')
    parser.add_argument('--default', action='store_true', default=False)
    parser.add_argument('--app-name', required=True)
    args = parser.parse_args()
    application = args.app_name
    #---------------
    config.setup(application)
    from dao.common import log
    log.setup('common')
    #---------------
    root = importlib.import_module(application)
    import_all(root.__file__)
    if args.default:
        header = ['section', 'name', 'default', 'help']
    else:
        header = ['section', 'name', 'value', 'help']
    opts = set()
    conf = config.get_config()
    for opt in config.CONFIG.get_options():
        value = opt.default if args.default \
            else conf[opt.section][opt.name]
        opts.add((opt.section, opt.name, value, opt.help))
    opts = sorted(opts)
    p = prettytable.PrettyTable(header)
    for opt in opts:
        p.add_row(opt)
    p.align = 'l'
    if args.csv:
        with open(args.csv, 'wb') as fout:
            writer = csv.writer(fout)
            writer.writerow(header)
            writer.writerows([opt for opt in opts])
Esempio n. 3
0
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.


import abc
import eventlet
from dao.common import config
from dao.common import log
opts = [config.StrOpt('dhcp', 'driver',
                      default='dao.control.worker.dhcp.neutron.NeutronHelper',
                      help='Path to DHCP helper')
        ]

config.register(opts)
CONF = config.get_config()
logger = log.getLogger(__name__)


class DHCPBase(object):

    instance = None

    @classmethod
    def get_helper(cls, worker=None):
        """
        :rtype: DHCPBase
        """
        if cls.instance:
            return cls.instance
        module, cls_name = CONF.dhcp.driver.rsplit('.', 1)
Esempio n. 4
0
from dao.common import utils
from dao.dhcp.db import api
from dao.dhcp import base

opts = [
    config.StrOpt('dhcp',
                  'leases_dir',
                  '/etc/dhcp/conf.d',
                  help='Path to the directory with static leases'),
    config.IntOpt('dhcp',
                  'restart_delay',
                  default=4,
                  help='Delay before restarting dhcp')
]
config.register(opts)
CONF = config.get_config()

LOG = log.getLogger(__name__)

Subnet = base.Subnet


class DHCPController(object):
    pending_restarts = False

    @classmethod
    def enable(cls):
        if cls.pending_restarts:
            return
        cls.pending_restarts = True
        time.sleep(CONF.dhcp.restart_delay)
Esempio n. 5
0
               default=10,
               help='interval between retries of opening a sql connection'),
    cfg.IntOpt('db', 'sql_max_overflow',
               default=None,
               help='If set, use this value for max_overflow with sqlalchemy'),
    cfg.IntOpt('db', 'sql_connection_debug',
               default=0,
               help='Verbosity of SQL debugging information. 0=None, '
                    '100=Everything'),
    cfg.BoolOpt('db', 'sql_connection_trace',
                default=False,
                help='Add python stack traces to SQL as comment strings'),
]

cfg.register(sql_opts)
CONF = cfg.get_config()
LOG = log.getLogger(__name__)
_ENGINE = None
_MAKER = None


def greenthread_yield(dbapi_con, con_record):
    """
    Ensure other greenthreads get a chance to execute by forcing a context
    switch. With common database backends (eg MySQLdb and sqlite), there is
    no implicit yield caused by network I/O since they are implemented by
    C libraries that eventlet cannot monkey patch.
    """
    eventlet.sleep(0)

Esempio n. 6
0
               'sql_max_overflow',
               default=None,
               help='If set, use this value for max_overflow with sqlalchemy'),
    cfg.IntOpt('db',
               'sql_connection_debug',
               default=0,
               help='Verbosity of SQL debugging information. 0=None, '
               '100=Everything'),
    cfg.BoolOpt('db',
                'sql_connection_trace',
                default=False,
                help='Add python stack traces to SQL as comment strings'),
]

cfg.register(sql_opts)
CONF = cfg.get_config()
LOG = log.getLogger(__name__)
_ENGINE = None
_MAKER = None


def greenthread_yield(dbapi_con, con_record):
    """
    Ensure other greenthreads get a chance to execute by forcing a context
    switch. With common database backends (eg MySQLdb and sqlite), there is
    no implicit yield caused by network I/O since they are implemented by
    C libraries that eventlet cannot monkey patch.
    """
    eventlet.sleep(0)