示例#1
0
 def update_queued_visits(self, queue):
     # TODO this should be made transactional
     table = class_mapper(visit_class).mapped_table
     # Now update each of the visits with the most recent expiry
     for visit_key, expiry in queue.items():
         log.info("updating visit (%s) to expire at %s", visit_key, expiry)
         get_engine().execute(table.update(table.c.visit_key == visit_key, values=dict(expiry=expiry)))
示例#2
0
def setup_package():
    assert os.path.exists(
        _config_file), 'Config file %s must exist' % _config_file
    load_config(configfile=_config_file)
    log_to_stream(sys.stdout, level=logging.DEBUG)
    get_engine()
    metadata.create_all()
示例#3
0
def status(command, args):
    get_engine()
    get_model()
    ret = compare_metadata(metadata, MetaData(metadata.bind))
    for l in ret:
        print l
    if not ret:
        print "Database matches model"
示例#4
0
def status(command, args):
    get_engine()
    get_model()
    ret = compare_metadata(metadata, MetaData(metadata.bind))
    for l in ret:
        print l
    if not ret:
        print "Database matches model"
示例#5
0
 def update_queued_visits(self, queue):
     # TODO this should be made transactional
     table = class_mapper(visit_class).mapped_table
     # Now update each of the visits with the most recent expiry
     for visit_key, expiry in queue.items():
         log.info("updating visit (%s) to expire at %s", visit_key,
                   expiry)
         get_engine().execute(table.update(table.c.visit_key == visit_key,
             values=dict(expiry=expiry)))
示例#6
0
def create_tables(drop_all=False):
    """Create all tables defined in the model in the database.

    Optionally drop existing tables before creating them.

    """
    get_engine()
    if drop_all:
        print "Dropping all database tables defined in model."
        metadata.drop_all()
    metadata.create_all()

    print "All database tables defined in model created."
示例#7
0
def setup_module():
    global _sa_dburi, _so_dburi
    _so_dburi = config.get("sqlobject.dburi", "sqlite:///:memory:")
    _sa_dburi = config.get("sqlalchemy.dburi", "sqlite:///:memory:")
    # sqlalchemy setup
    database.set_db_uri({"sqlalchemy.dburi": "sqlite:///:memory:"})
    sqlalchemy_cleanup()
    get_engine()
    # sqlobject setup
    database.set_db_uri("sqlite:///:memory:")
    # table setup
    create_tables()
    hub.commit()
示例#8
0
def create_tables(drop_all=False):
    """Create all tables defined in the model in the database.

    Optionally drop existing tables before creating them.

    """
    get_engine()
    if drop_all:
        print "Dropping all database tables defined in model."
        metadata.drop_all()
    metadata.create_all()

    print "All database tables defined in model created."
示例#9
0
def main():
    current_date = None
    parser = get_parser()
    opts, args = parser.parse_args()
    threshold = opts.threshold
    reservation_type = opts.reservation_type.decode(sys.stdin.encoding or "utf8")
    testing = opts.testing
    configfile = opts.configfile
    load_config(configfile)
    log_to_stream(sys.stderr)
    interface.start(config)
    get_engine()
    if testing:
        print "Dry run only, nothing will be sent\n"
    identify_nags(threshold, reservation_type, testing)
示例#10
0
    def __init__(self, timeout):
        global visit_class
        visit_class_path = config.get("visit.saprovider.model", "turbogears.visit.savisit.TG_Visit")
        visit_class = load_class(visit_class_path)
        if visit_class is None:
            msg = "No visit class found for %s" % visit_class_path
            msg += ", did you run setup.py develop?"
            log.error(msg)

        get_engine()
        if visit_class is TG_Visit:
            mapper(visit_class, visits_table)
        # base-class' __init__ triggers self.create_model, so mappers need to
        # be initialized before.
        super(SqlAlchemyVisitManager, self).__init__(timeout)
示例#11
0
    def __init__(self, timeout):
        global visit_class
        visit_class_path = config.get("visit.saprovider.model",
            "turbogears.visit.savisit.TG_Visit")
        visit_class = load_class(visit_class_path)
        if visit_class is None:
            msg = 'No visit class found for %s' % visit_class_path
            msg += ', did you run setup.py develop?'
            log.error(msg)

        get_engine()
        if visit_class is TG_Visit:
            mapper(visit_class, visits_table)
        # base-class' __init__ triggers self.create_model, so mappers need to
        # be initialized before.
        super(SqlAlchemyVisitManager, self).__init__(timeout)
示例#12
0
    def all(cls):
        """
        Returns a list of all defined migrations, whether or not they have been 
        applied to this database yet.
        """
        # Beaker 23.0 originally shipped with data migrations handled manually
        # by beaker-init, without this database table, so we need to gracefully
        # handle the case where this table doesn't exist. In that case we
        # assume there are *no* incomplete migrations because it means the
        # admin has successfully run them using beaker-init from 23.0.
        # This special handling can be removed in 24.0+ because it is assumed
        # that the admin will run all Alembic migrations like normal.
        if cls.__tablename__ not in inspect(get_engine()).get_table_names():
            logger.debug(
                'Data migration table does not exist, skipping all migrations')
            return []

        migrations = []
        for filename in pkg_resources.resource_listdir('bkr.server',
                                                       'data-migrations'):
            name, extension = os.path.splitext(filename)
            if extension != '.py':
                continue
            name = name.decode(sys.getfilesystemencoding())
            try:
                migration = cls.query.filter(cls.name == name).one()
            except NoResultFound:
                migration = cls(name=name)
            migrations.append(migration)
        return migrations
示例#13
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    load_config_or_exit()
    engine = get_engine()

    # In case you are trying to perform column change (size for example)
    # alembic will not handle this automatically.
    # Additional parameter has to be passed into context.configure(compare_type=True)
    # Be aware that output of this can be error-pone. Especially for dialect.
    # We want to keep dialect tight with SQLAlchemy all the time.
    # So in case migration contains import to MySQL dialect (MySQL is used as main backend)
    # then this migration has to written manually - But migration content can still help you
    # how to perform this migration
    connection = engine.connect()
    context.configure(connection=connection, target_metadata=target_metadata)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
示例#14
0
 def test_does_not_run_createrepo_unnecessarily(self):
     if 'sqlite' in get_engine().name:
         raise unittest.SkipTest('SQL generated by lazy_create is not valid'
                                 ' in sqlite')
     osmajor = u'GreenBeretLinux99'
     with session.begin():
         lab_controller = data_setup.create_labcontroller(
             fqdn=u'dummylab.example.invalid')
         distro_tree = data_setup.create_distro_tree(
             osmajor=OSMajor.lazy_create(osmajor=osmajor),
             harness_dir=False,
             lab_controllers=[lab_controller])
     remote_harness_dir = mkdtemp(suffix='remote')
     self.addCleanup(rmtree, remote_harness_dir)
     local_harness_dir = mkdtemp(suffix='local')
     self.addCleanup(rmtree, local_harness_dir)
     self._create_remote_harness(remote_harness_dir, osmajor)
     # run it once, repo is built
     update_repos('file://%s/' % remote_harness_dir, local_harness_dir)
     repodata_dir = os.path.join(local_harness_dir, osmajor, 'repodata')
     mtime = os.path.getmtime(repodata_dir)
     # run it again, repo should not be rebuilt
     time.sleep(0.001)
     update_repos('file://%s/' % remote_harness_dir, local_harness_dir)
     self.assertEquals(os.path.getmtime(repodata_dir), mtime)
示例#15
0
    def test_update_harness_repos(self):
        """Test that the update_repo() call runs as expected.

        This checks that the harness repos that are supposed to be
        synced are actually synced.

        Does not check repo metadata.
        """
        if 'sqlite' in get_engine().name:
            raise unittest.SkipTest('SQL generated by lazy_create is not valid'
                ' in sqlite')
        base_path = mkdtemp()
        self.addCleanup(rmtree, base_path)
        faux_remote_harness1 = self._create_remote_harness(base_path, 'foobangmajor')
        faux_remote_harness2 = self._create_remote_harness(base_path, 'foobazmajor')
        faux_local_harness = mkdtemp('local_harness')
        self.addCleanup(rmtree, faux_local_harness)
        with session.begin():
            lab_controller = data_setup.create_labcontroller(fqdn=u'dummylab.example.invalid')
            distro_tree = data_setup.create_distro_tree(
                                osmajor=OSMajor.lazy_create(osmajor=u'foobangmajor'),
                                harness_dir=False,
                                lab_controllers=[lab_controller])
            distro_tree = data_setup.create_distro_tree(
                                osmajor=OSMajor.lazy_create(osmajor=u'foobazmajor'),
                                harness_dir=False,
                                lab_controllers=[lab_controller])
        # I'm not testing the config here, so just use createrepo
        update_repos('file://%s/' % base_path, faux_local_harness)
        self.assertTrue(os.path.exists(os.path.join(faux_local_harness, 'foobangmajor')))
        self.assertTrue(os.path.exists(os.path.join(faux_local_harness, 'foobazmajor')))
示例#16
0
    def all(cls):
        """
        Returns a list of all defined migrations, whether or not they have been 
        applied to this database yet.
        """
        # Beaker 23.0 originally shipped with data migrations handled manually 
        # by beaker-init, without this database table, so we need to gracefully 
        # handle the case where this table doesn't exist. In that case we 
        # assume there are *no* incomplete migrations because it means the 
        # admin has successfully run them using beaker-init from 23.0.
        # This special handling can be removed in 24.0+ because it is assumed 
        # that the admin will run all Alembic migrations like normal.
        if cls.__tablename__ not in inspect(get_engine()).get_table_names():
            logger.debug('Data migration table does not exist, skipping all migrations')
            return []

        migrations = []
        for filename in pkg_resources.resource_listdir('bkr.server', 'data-migrations'):
            name, extension = os.path.splitext(filename)
            if extension != '.py':
                continue
            name = name.decode(sys.getfilesystemencoding())
            try:
                migration = cls.query.filter(cls.name == name).one()
            except NoResultFound:
                migration = cls(name=name)
            migrations.append(migration)
        return migrations
示例#17
0
    def test_update_harness_repos(self):
        """Test that the update_repo() call runs as expected.

        This checks that the harness repos that are supposed to be
        synced are actually synced.

        Does not check repo metadata.
        """
        if 'sqlite' in get_engine().name:
            raise unittest.SkipTest('SQL generated by lazy_create is not valid'
                                    ' in sqlite')
        base_path = mkdtemp()
        self.addCleanup(rmtree, base_path)
        faux_remote_harness1 = self._create_remote_harness(
            base_path, 'foobangmajor')
        faux_remote_harness2 = self._create_remote_harness(
            base_path, 'foobazmajor')
        faux_local_harness = mkdtemp('local_harness')
        self.addCleanup(rmtree, faux_local_harness)
        with session.begin():
            OSMajor.lazy_create(osmajor=u'foobangmajor')
            OSMajor.lazy_create(osmajor=u'foobazmajor')
        # I'm not testing the config here, so just use createrepo
        update_repos('file://%s/' % base_path, faux_local_harness)
        self.assertTrue(
            os.path.exists(os.path.join(faux_local_harness, 'foobangmajor')))
        self.assertTrue(
            os.path.exists(os.path.join(faux_local_harness, 'foobazmajor')))
示例#18
0
def execute(command, args):
    eng = get_engine()
    for cmd in args[2:]:
        ret = eng.execute(cmd)
        try:
            print list(ret)
        except:
            # Proceed silently if the command produced no results
            pass
示例#19
0
 def check_db(self,fqdn):
     conn = get_engine().connect()
     result = conn.execute("SELECT status,l.fqdn, type \
                     FROM system \
                         INNER JOIN lab_controller AS l ON system.lab_controller_id = l.id\
                     WHERE system.fqdn = %s", fqdn).fetchone()
     if not result:
         raise AssertionError('Could not find status,type,lab_controller for  system %s in db' % fqdn)
     return {'status' : result[0], 'lab_controller' : result[1], 'type' : result[2] }
示例#20
0
def execute(command, args):
    eng = get_engine()
    for cmd in args[2:]:
        ret = eng.execute(cmd)
        try:
            print list(ret)
        except:
            # Proceed silently if the command produced no results
            pass
示例#21
0
def run_data_migrations():
    migration = _outstanding_data_migrations[0]
    log.debug('Performing online data migration %s (one batch)', migration.name)
    finished = migration.migrate_one_batch(get_engine())
    if finished:
        log.debug('Marking online data migration %s as finished', migration.name)
        with session.begin():
            migration.mark_as_finished()
        session.close()
        _outstanding_data_migrations.pop(0)
    return True
示例#22
0
def run_data_migrations():
    migration = _outstanding_data_migrations[0]
    log.debug('Performing online data migration %s (one batch)', migration.name)
    finished = migration.migrate_one_batch(get_engine())
    if finished:
        log.debug('Marking online data migration %s as finished', migration.name)
        with session.begin():
            migration.mark_as_finished()
        session.close()
        _outstanding_data_migrations.pop(0)
    return True
示例#23
0
def workaround_sqlite_begin():
    engine = get_engine()

    @event.listens_for(engine, "connect")
    def do_connect(dbapi_connection, connection_record):
        # disable pysqlite's emitting of the BEGIN statement entirely.
        # also stops it from emitting COMMIT before any DDL.
        dbapi_connection.isolation_level = None

    @event.listens_for(engine, "begin")
    def do_begin(conn):
        # emit our own BEGIN
        conn.execute("BEGIN")
示例#24
0
文件: util.py 项目: sibiaoluo/beaker
def get_reports_engine():
    global _reports_engine
    if config.get('reports_engine.dburi'):
        if not _reports_engine:
            # same logic as in turbogears.database.get_engine
            engine_args = dict()
            for k, v in config.config.configMap['global'].iteritems():
                if k.startswith('reports_engine.'):
                    engine_args[k[len('reports_engine.'):]] = v
            dburi = engine_args.pop('dburi')
            log.debug('Creating reports_engine: %r %r', dburi, engine_args)
            _reports_engine = create_engine(dburi, **engine_args)
        return _reports_engine
    else:
        log.debug('Using default engine for reports_engine')
        return get_engine()
示例#25
0
文件: util.py 项目: xhernandez/beaker
def get_reports_engine():
    global _reports_engine
    if app.config.get('reports_engine.dburi'):
        if not _reports_engine:
            # same logic as in turbogears.database.get_engine
            engine_args = dict()
            for k, v in app.config.iteritems():
                if k.startswith('reports_engine.'):
                    engine_args[k[len('reports_engine.'):]] = v
            dburi = engine_args.pop('dburi')
            _reports_engine = create_engine(dburi, **engine_args)
            log.debug('Created reports_engine %r', _reports_engine)
        return _reports_engine
    else:
        log.debug('Using default engine for reports_engine')
        return get_engine()
示例#26
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    load_config_or_exit()
    engine = get_engine()

    connection = engine.connect()
    context.configure(connection=connection, target_metadata=target_metadata)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
示例#27
0
 def test_exclude_nonexistent_osmajor(self):
     if 'sqlite' in get_engine().name:
         raise unittest.SkipTest('SQL generated by lazy_create is not valid'
             ' in sqlite')
     with session.begin():
         osmajor = OSMajor.lazy_create(osmajor="exist")
         lab_controller = data_setup.create_labcontroller(fqdn=u'dummylab.example.invalid')
         distro_tree = data_setup.create_distro_tree(osmajor=osmajor.osmajor,
                                                     harness_dir=False,
                                                     lab_controllers=[lab_controller])
         nonexistent_osmajor = OSMajor.lazy_create(osmajor=u'notexist')
     remote_harness_dir = mkdtemp(suffix='remote')
     self.addCleanup(rmtree, remote_harness_dir)
     local_harness_dir = mkdtemp(suffix='local')
     self.addCleanup(rmtree, local_harness_dir)
     self._create_remote_harness(remote_harness_dir, osmajor.osmajor)
     update_repos('file://%s/' % remote_harness_dir, local_harness_dir)
     self.assertTrue(os.path.exists(os.path.join(local_harness_dir, osmajor.osmajor)))
     self.assertFalse(os.path.exists(os.path.join(local_harness_dir, nonexistent_osmajor.osmajor)))
示例#28
0
 def test_does_not_run_createrepo_unnecessarily(self):
     if 'sqlite' in get_engine().name:
         raise unittest.SkipTest('SQL generated by lazy_create is not valid'
             ' in sqlite')
     osmajor = u'GreenBeretLinux99'
     with session.begin():
         OSMajor.lazy_create(osmajor=osmajor)
     remote_harness_dir = mkdtemp(suffix='remote')
     self.addCleanup(rmtree, remote_harness_dir)
     local_harness_dir = mkdtemp(suffix='local')
     self.addCleanup(rmtree, local_harness_dir)
     self._create_remote_harness(remote_harness_dir, osmajor)
     # run it once, repo is built
     update_repos('file://%s/' % remote_harness_dir, local_harness_dir)
     repodata_dir = os.path.join(local_harness_dir, osmajor, 'repodata')
     mtime = os.path.getmtime(repodata_dir)
     # run it again, repo should not be rebuilt
     time.sleep(0.001)
     update_repos('file://%s/' % remote_harness_dir, local_harness_dir)
     self.assertEquals(os.path.getmtime(repodata_dir), mtime)
示例#29
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    load_config_or_exit()
    engine = get_engine()

    connection = engine.connect()
    context.configure(
                connection=connection,
                target_metadata=target_metadata
                )

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
示例#30
0
 def test_exclude_nonexistent_osmajor(self):
     if 'sqlite' in get_engine().name:
         raise unittest.SkipTest('SQL generated by lazy_create is not valid'
                                 ' in sqlite')
     with session.begin():
         osmajor = OSMajor.lazy_create(osmajor="exist")
         lab_controller = data_setup.create_labcontroller(
             fqdn=u'dummylab.example.invalid')
         distro_tree = data_setup.create_distro_tree(
             osmajor=osmajor.osmajor,
             harness_dir=False,
             lab_controllers=[lab_controller])
         nonexistent_osmajor = OSMajor.lazy_create(osmajor=u'notexist')
     remote_harness_dir = mkdtemp(suffix='remote')
     self.addCleanup(rmtree, remote_harness_dir)
     local_harness_dir = mkdtemp(suffix='local')
     self.addCleanup(rmtree, local_harness_dir)
     self._create_remote_harness(remote_harness_dir, osmajor.osmajor)
     update_repos('file://%s/' % remote_harness_dir, local_harness_dir)
     self.assertTrue(
         os.path.exists(os.path.join(local_harness_dir, osmajor.osmajor)))
     self.assertFalse(
         os.path.exists(
             os.path.join(local_harness_dir, nonexistent_osmajor.osmajor)))
示例#31
0
文件: fasmodel.py 项目: 0-T-0/fas
from sqlalchemy.orm import relation
from sqlalchemy.exc import InvalidRequestError

# A few sqlalchemy tricks:
# Allow viewing foreign key relations as a dictionary
from sqlalchemy.orm.collections import attribute_mapped_collection
# Allow us to reference the remote table of a many:many as a simple list
from sqlalchemy.ext.associationproxy import association_proxy

from fedora.client import DictContainer
from fedora.tg.json import SABase
import fas
from fas import SHARE_CC_GROUP, SHARE_LOC_GROUP

# Bind us to the database defined in the config file.
get_engine()

#
# Tables Mapped from the DB
#

PeopleTable = Table('people', metadata, autoload=True)
PersonRolesTable = Table('person_roles', metadata, autoload=True)

ConfigsTable = Table('configs', metadata, autoload=True)
GroupsTable = Table('groups', metadata, autoload=True)
BugzillaQueueTable = Table('bugzilla_queue', metadata, autoload=True)
LogTable = Table('log', metadata, autoload=True)
RequestsTable = Table('requests', metadata, autoload=True)

SessionTable = Table('session', metadata, autoload=True)
示例#32
0
#
"""
Mapping of database tables for logs to python classes.
"""

from sqlalchemy import Table
from sqlalchemy import select, literal_column, not_
from sqlalchemy.orm import polymorphic_union, relation
from turbogears.database import metadata, mapper, get_engine

from fedora.tg.json import SABase

from pkgdb.model.packages import Package, PackageListing
from pkgdb.model.acls import PersonPackageListingAcl, GroupPackageListingAcl

get_engine()

#
# Mapped Classes
#


class Log(SABase):
    """Base Log record.

    This is a Log record.  All logs will be entered via a subclass of this.

    Table -- Log
    """

    # pylint: disable-msg=R0902, R0903
示例#33
0
def setup_module():
    global fresh_metadata, users_table, test_table, Person, Address, Test

    config.update({"sqlalchemy.dburi": "sqlite:///:memory:"})

    if os.path.exists('freshtest.db'):
        os.unlink('freshtest.db')

    get_engine()
    fresh_metadata = MetaData()
    # :memory: can't be used in multiple threads
    fresh_metadata.bind = 'sqlite:///freshtest.db'
    metadata.bind.echo = True
    fresh_metadata.bind.echo = True

    users_table = Table("users", metadata,
                        Column("user_id", Integer, primary_key=True),
                        Column("user_name", String(40)),
                        Column("password", String(10)))

    mapper(User, users_table)

    if ActiveMapper:

        class Person(ActiveMapper):
            class mapping:
                id = column(Integer, primary_key=True)
                name = column(String(40))
                addresses = one_to_many("Address")

        class Address(ActiveMapper):
            class mapping:
                id = column(Integer, primary_key=True)
                address = column(String(40))
                city = column(String(40))
                person_id = column(Integer,
                                   foreign_key=ForeignKey("person.id"))

    else:

        persons_table = Table("persons", metadata,
                              Column("id", Integer, primary_key=True),
                              Column("name", String(40)))
        addresses_table = Table(
            "addresses", metadata, Column("id", Integer, primary_key=True),
            Column("address", String(40)), Column("city", String(40)),
            Column("person_id", Integer, ForeignKey(persons_table.c.id)))

        class Person(object):
            pass

        class Address(object):
            pass

        mapper(Person, persons_table)
        mapper(Address,
               addresses_table,
               properties=dict(person=relation(Person, backref='addresses')))

    test_table = Table("test", fresh_metadata,
                       Column("id", Integer, primary_key=True),
                       Column("val", String(40)))

    class Test(object):
        pass

    mapper(Test, test_table)

    try:
        metadata.create_all()
        fresh_metadata.create_all()
    except Exception, error:
        # workaround for a problem with PySqlite < 2.6 and SQLAlchemy < 0.5
        if 'Cannot operate on a closed cursor' in str(error):
            metadata.create_all(checkfirst=False)
            fresh_metadata.create_all(checkfirst=False)
        else:
            raise
示例#34
0
文件: init.py 项目: sibiaoluo/beaker
def init_db(user_name=None, password=None, user_display_name=None, user_email_address=None):
    get_engine()
    metadata.create_all()
    session.begin()

    try:
        admin = Group.by_name(u'admin')
    except InvalidRequestError:
        admin     = Group(group_name=u'admin',display_name=u'Admin')

    try:
        lab_controller = Group.by_name(u'lab_controller')
    except InvalidRequestError:
        lab_controller = Group(group_name=u'lab_controller',
                               display_name=u'Lab Controller')
    
    #Setup User account
    if user_name:
        if password:
            user = User(user_name=user_name.decode('utf8'), password=password.decode('utf8'))
            if user_display_name:
                user.display_name = user_display_name.decode('utf8')
            if user_email_address:
                user.email_address = user_email_address.decode('utf8')
            admin.users.append(user)
        else:
            print "Password must be provided with username"
    elif len(admin.users) == 0:
        print "No admin account exists, please create one with --user"
        sys.exit(1)

    # Create distro_expire perm if not present
    try:
        distro_expire_perm = Permission.by_name(u'distro_expire')
    except NoResultFound:
        distro_expire_perm = Permission(u'distro_expire')

    # Create proxy_auth perm if not present
    try:
        proxy_auth_perm = Permission.by_name(u'proxy_auth')
    except NoResultFound:
        proxy_auth_perm = Permission(u'proxy_auth')

    # Create tag_distro perm if not present
    try:
        tag_distro_perm = Permission.by_name(u'tag_distro')
    except NoResultFound:
        tag_distro_perm = Permission(u'tag_distro')
        admin.permissions.append(tag_distro_perm)

    # Create stop_task perm if not present
    try:
        stop_task_perm = Permission.by_name(u'stop_task')
    except NoResultFound:
        stop_task_perm = Permission(u'stop_task')
        lab_controller.permissions.append(stop_task_perm)
        admin.permissions.append(stop_task_perm)

    # Create secret_visible perm if not present
    try:
        secret_visible_perm = Permission.by_name(u'secret_visible')
    except NoResultFound:
        secret_visible_perm = Permission(u'secret_visible')
        lab_controller.permissions.append(secret_visible_perm)
        admin.permissions.append(secret_visible_perm)

    #Setup Hypervisors Table
    if Hypervisor.query.count() == 0:
        kvm       = Hypervisor(hypervisor=u'KVM')
        xen       = Hypervisor(hypervisor=u'Xen')
        hyperv    = Hypervisor(hypervisor=u'HyperV')
        vmware    = Hypervisor(hypervisor=u'VMWare')

    #Setup kernel_type Table
    if KernelType.query.count() == 0:
        default  = KernelType(kernel_type=u'default', uboot=False)
        highbank = KernelType(kernel_type=u'highbank', uboot=False)
        imx      = KernelType(kernel_type=u'imx', uboot=False)
        mvebu    = KernelType(kernel_type=u'mvebu', uboot=True)
        omap     = KernelType(kernel_type=u'omap', uboot=False)
        tegra    = KernelType(kernel_type=u'tegra', uboot=False)

    #Setup base Architectures
    if Arch.query.count() == 0:
        i386   = Arch(u'i386')
        x86_64 = Arch(u'x86_64')
        ia64   = Arch(u'ia64')
        ppc    = Arch(u'ppc')
        ppc64  = Arch(u'ppc64')
        s390   = Arch(u's390')
        s390x  = Arch(u's390x')
        armhfp = Arch(u'armhfp')

    #Setup base power types
    if PowerType.query.count() == 0:
        apc_snmp    = PowerType(u'apc_snmp')
        PowerType(u'apc_snmp_then_etherwake')
        bladecenter = PowerType(u'bladecenter')
        bullpap     = PowerType(u'bladepap')
        drac        = PowerType(u'drac')
        ether_wake  = PowerType(u'ether_wake')
        PowerType(u'hyper-v')
        ilo         = PowerType(u'ilo')
        integrity   = PowerType(u'integrity')
        ipmilan     = PowerType(u'ipmilan')
        ipmitool    = PowerType(u'ipmitool')
        lpar        = PowerType(u'lpar')
        rsa         = PowerType(u'rsa')
        virsh       = PowerType(u'virsh')
        wti         = PowerType(u'wti')

    #Setup key types
    if Key.query.count() == 0:
        DISKSPACE       = Key(u'DISKSPACE',True)
        COMMENT         = Key(u'COMMENT')
        CPUFAMILY       = Key(u'CPUFAMILY',True)
        CPUFLAGS        = Key(u'CPUFLAGS')
        CPUMODEL        = Key(u'CPUMODEL')
        CPUMODELNUMBER  = Key(u'CPUMODELNUMBER', True)
        CPUSPEED        = Key(u'CPUSPEED',True)
        CPUVENDOR       = Key(u'CPUVENDOR')
        DISK            = Key(u'DISK',True)
        FORMFACTOR      = Key(u'FORMFACTOR')
        HVM             = Key(u'HVM')
        MEMORY          = Key(u'MEMORY',True)
        MODEL           = Key(u'MODEL')
        MODULE          = Key(u'MODULE')
        NETWORK         = Key(u'NETWORK')
        NR_DISKS        = Key(u'NR_DISKS',True)
        NR_ETH          = Key(u'NR_ETH',True)
        NR_IB           = Key(u'NR_IB',True)
        PCIID           = Key(u'PCIID')
        PROCESSORS      = Key(u'PROCESSORS',True)
        RTCERT          = Key(u'RTCERT')
        SCRATCH         = Key(u'SCRATCH')
        STORAGE         = Key(u'STORAGE')
        USBID           = Key(u'USBID')
        VENDOR          = Key(u'VENDOR')
        XENCERT         = Key(u'XENCERT')
        NETBOOT         = Key(u'NETBOOT_METHOD')

    #Setup ack/nak reposnses
    if Response.query.count() == 0:
        ACK      = Response(response=u'ack')
        NAK      = Response(response=u'nak')

    if RetentionTag.query.count() == 0:
        SCRATCH         = RetentionTag(tag=u'scratch', is_default=1, expire_in_days=30)
        SIXTYDAYS       = RetentionTag(tag=u'60days', needs_product=False, expire_in_days=60)
        ONETWENTYDAYS   = RetentionTag(tag=u'120days', needs_product=False, expire_in_days=120)
        ACTIVE          = RetentionTag(tag=u'active', needs_product=True)
        AUDIT           = RetentionTag(tag=u'audit', needs_product=True)

    config_items = [
        # name, description, numeric
        (u'root_password', u'Plaintext root password for provisioned systems', False),
        (u'root_password_validity', u"Maximum number of days a user's root password is valid for", True),
        (u'default_guest_memory', u"Default memory (MB) for dynamic guest provisioning", True),
        (u'default_guest_disk_size', u"Default disk size (GB) for dynamic guest provisioning", True),
        (u'guest_name_prefix', u'Prefix for names of dynamic guests in oVirt', False),
    ]
    for name, description, numeric in config_items:
        try:
            ConfigItem.by_name(name)
        except NoResultFound:
            ConfigItem(name=name, description=description, numeric=numeric)
    session.flush()
    if ConfigItem.by_name(u'root_password').current_value() is None:
        ConfigItem.by_name(u'root_password').set(u'beaker', user=admin.users[0])

    session.commit()
    session.close()
示例#35
0
def create(command, args):
    print "Creating tables at %s" % (config.get("sqlalchemy.dburi"))
    get_engine()
    get_model()
    metadata.create_all()
示例#36
0
def apply_change(file):
    text = open(file).read()
    print "applying ", file
    s = sql.text(text)
    get_engine().execute(s)
示例#37
0
# 
#     http://www.apache.org/licenses/LICENSE-2.0
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from datetime import datetime
from sqlalchemy import *
from sqlalchemy.orm import relation
from turbogears import database

database.set_db_uri('sqlite:///:memory:', 'sqlalchemy')
database.get_engine()
metadata = database.metadata
metadata.bind.echo = True

groups_table = Table('tg_group', metadata,
    Column('group_id', Integer, primary_key=True),
    Column('group_name', Unicode(16), unique=True),
    Column('display_name', Unicode(255)),
    Column('created', DateTime, default=datetime.now)
)

users_table = Table('tg_user', metadata,
    Column('user_id', Integer, primary_key=True),
    Column('user_name', Unicode(16), unique=True),
    Column('email_address', Unicode(255), unique=True),
    Column('display_name', Unicode(255)),
示例#38
0
def apply_change(file):
    text = open(file).read()
    print "applying ", file
    s = sql.text(text)
    get_engine().execute(s)
示例#39
0
def startTurboGears():
    """Handles TurboGears tasks when the CherryPy server starts.

    This adds the "tg_js" configuration to make MochiKit accessible.
    It also turns on stdlib logging when in development mode.

    """
    # Set up old-style logging
    cherrypy.config.environments['development'][
        'log_debug_info_filter.on'] = False

    if not config.get('tg.new_style_logging'):
        if config.get('server.log_to_screen'):
            setuplog = logging.getLogger()
            setuplog.setLevel(logging.DEBUG)
            fmt = logging.Formatter(
                "%(asctime)s %(name)s %(levelname)s %(message)s")
            handler = logging.StreamHandler(sys.stdout)
            handler.setLevel(logging.DEBUG)
            handler.setFormatter(fmt)
            setuplog.addHandler(handler)

        logfile = config.get('server.log_file')
        if logfile:
            setuplog = logging.getLogger('turbogears.access')
            setuplog.propagate = 0
            fmt = logging.Formatter("%(message)s")
            handler = logging.FileHandler(logfile)
            handler.setLevel(logging.INFO)
            handler.setFormatter(fmt)
            setuplog.addHandler(handler)

    # Add static filters
    resource_filename = pkg_resources.resource_filename
    config.update({'/tg_static': {
            'static_filter.on': True,
            'static_filter.dir':
                os.path.abspath(resource_filename(__name__, 'static')),
            'log_debug_info_filter.on': False,
        }})
    config.update({'/tg_js': {
            'static_filter.on': True,
            'static_filter.dir':
                os.path.abspath(resource_filename(__name__, 'static/js')),
            'log_debug_info_filter.on': False,
        }})
    mochikit_version = config.get(
        'tg.mochikit_version', config.get('tg_mochikit.version', '1.3'))
    mochikit_suffix = '_'.join(mochikit_version.split('.', 2)[:2])
    mochikit_file = os.path.abspath(resource_filename(
        __name__, 'static/js/MochiKit_%s.js' % mochikit_suffix))
    if os.path.exists(mochikit_file):
        for path in ('/tg_static/js/MochiKit.js', '/tg_js/MochiKit.js',
                '/tg_widgets/turbogears/js/MochiKit.js'):
            config.update({path: {
                'static_filter.on': True,
                'static_filter.file': mochikit_file,
            }})
    else:
        log.warning("MochiKit version %s not available" % mochikit_version)
    # Add decoding filter
    if config.get('decoding_filter.on', path='/') is None:
        config.update({'/': {
            'decoding_filter.on': True,
            'decoding_filter.encoding': config.get('kid.encoding', 'utf8')
        }})

    # Initialize template engines and load base templates
    view.load_engines()
    view.loadBaseTemplates()

    # Add request filters
    global webpath
    webpath = config.get('server.webpath') or ''

    if getattr(cherrypy, 'root', None):
        if not hasattr(cherrypy.root, '_cp_filters'):
            cherrypy.root._cp_filters = []
        cherrypy.root._cp_filters.extend([VirtualPathFilter(webpath),
            EndTransactionsFilter(), NestedVariablesFilter()])

    webpath = webpath.lstrip('/')
    if webpath and not webpath.endswith('/'):
        webpath += '/'

    # Register server with Bonjour framework
    bonjoursetting = config.get('tg.bonjour', None)
    if bonjoursetting or config.get('server.environment') == 'development':
        start_bonjour(bonjoursetting)

    # Bind metadata for SQLAlchemy
    if config.get("sqlalchemy.dburi"):
        database.get_engine()

    # Start all TurboGears extensions
    extensions = pkg_resources.iter_entry_points('turbogears.extensions')
    for entrypoint in extensions:
        ext = entrypoint.load()
        if hasattr(ext, 'start_extension'):
            ext.start_extension()

    # Call registered startup functions
    for item in call_on_startup:
        item()

    # Start the scheduler
    if config.get('tg.scheduler', False):
        scheduler._start_scheduler()
        log.info('Scheduler started')
示例#40
0
 def create_model(self):
     """Create the Visit table if it doesn't already exist."""
     get_engine()
     class_mapper(visit_class).local_table.create(checkfirst=True)
示例#41
0
def setup_module():
    global fresh_metadata, users_table, test_table, Person, Address, Test

    config.update({
        "sqlalchemy.dburi" : "sqlite:///:memory:"})

    if os.path.exists('freshtest.db'):
        os.unlink('freshtest.db')

    get_engine()
    fresh_metadata = MetaData()
    # :memory: can't be used in multiple threads
    fresh_metadata.bind = 'sqlite:///freshtest.db'
    metadata.bind.echo = True
    fresh_metadata.bind.echo = True

    users_table = Table("users", metadata,
        Column("user_id", Integer, primary_key=True),
        Column("user_name", String(40)),
        Column("password", String(10)))

    mapper(User, users_table)

    if ActiveMapper:

        class Person(ActiveMapper):
            class mapping:
                id = column(Integer, primary_key=True)
                name = column(String(40))
                addresses = one_to_many("Address")

        class Address(ActiveMapper):
            class mapping:
                id = column(Integer, primary_key=True)
                address = column(String(40))
                city = column(String(40))
                person_id = column(Integer,
                    foreign_key=ForeignKey("person.id"))

    else:

        persons_table = Table("persons", metadata,
                Column("id", Integer, primary_key=True),
                Column("name", String(40)))
        addresses_table = Table("addresses", metadata,
                Column("id", Integer, primary_key=True),
                Column("address", String(40)),
                Column("city", String(40)),
                Column("person_id", Integer,
                    ForeignKey(persons_table.c.id)))

        class Person(object):
            pass

        class Address(object):
            pass

        mapper(Person, persons_table)
        mapper(Address, addresses_table, properties=dict(
            person=relation(Person, backref='addresses')))

    test_table = Table("test", fresh_metadata,
        Column("id", Integer, primary_key=True),
        Column("val", String(40)))

    class Test(object):
        pass

    mapper(Test, test_table)

    try:
        metadata.create_all()
        fresh_metadata.create_all()
    except Exception, error:
        # workaround for a problem with PySqlite < 2.6 and SQLAlchemy < 0.5
        if 'Cannot operate on a closed cursor' in str(error):
            metadata.create_all(checkfirst=False)
            fresh_metadata.create_all(checkfirst=False)
        else:
            raise
示例#42
0
    def run(self):
        """Run the shell"""
        self.find_config()

        mod = get_model()
        if mod:
            locals = mod.__dict__
        else:
            locals = dict(__name__="tg-admin")

        if config.get("sqlalchemy.dburi"):
            using_sqlalchemy = True
            database.get_engine()
            locals.update(dict(session=database.session,
                metadata=database.metadata))
        else:
            using_sqlalchemy = False

        try:
            # try to use IPython if possible
            from IPython.terminal.interactiveshell import TerminalInteractiveShell

            class CustomIPShell(TerminalInteractiveShell):
                def raw_input(self, *args, **kw):
                    try:
                        return TerminalInteractiveShell.raw_input(self,
                            *args, **kw) # needs decoding (see below)?
                    except EOFError:
                        r = raw_input("Do you wish to commit your "
                                    "database changes? [yes]")
                        if not r.lower().startswith("n"):
                            if using_sqlalchemy:
                                self.push("session.flush()")
                            else:
                                self.push("hub.commit()")
                        raise EOFError

            shell = CustomIPShell(user_ns=locals)
            shell.mainloop()
        except ImportError:
            import code

            class CustomShell(code.InteractiveConsole):
                def raw_input(self, *args, **kw):
                    try:
                        import readline
                    except ImportError:
                        pass
                    try:
                        r = code.InteractiveConsole.raw_input(self,
                            *args, **kw)
                        for encoding in (getattr(sys.stdin, 'encoding', None),
                                sys.getdefaultencoding(), 'utf-8', 'latin-1'):
                            if encoding:
                                try:
                                    return r.decode(encoding)
                                except UnicodeError:
                                    pass
                        return r
                    except EOFError:
                        r = raw_input("Do you wish to commit your "
                                      "database changes? [yes]")
                        if not r.lower().startswith("n"):
                            if using_sqlalchemy:
                                self.push("session.flush()")
                            else:
                                self.push("hub.commit()")
                        raise EOFError

            shell = CustomShell(locals=locals)
            shell.interact()
示例#43
0
 def create_model(self):
     """Create the Visit table if it doesn't already exist."""
     get_engine()
     class_mapper(visit_class).local_table.create(checkfirst=True)
示例#44
0
def create(command, args):
    print "Creating tables at %s" % (config.get("sqlalchemy.dburi"))
    get_engine()
    get_model()
    metadata.create_all()
示例#45
0
def setup_package():
    assert os.path.exists(_config_file), "Config file %s must exist" % _config_file
    load_config(configfile=_config_file)
    log_to_stream(sys.stdout, level=logging.DEBUG)
    get_engine()
    metadata.create_all()
示例#46
0
文件: init.py 项目: ustbgaofan/beaker
def init_db(user_name=None, password=None, user_display_name=None, user_email_address=None):
    get_engine()
    metadata.create_all()
    session.begin()

    try:
        admin = Group.by_name(u'admin')
    except InvalidRequestError:
        admin     = Group(group_name=u'admin',display_name=u'Admin')
        session.add(admin)

    try:
        lab_controller = Group.by_name(u'lab_controller')
    except InvalidRequestError:
        lab_controller = Group(group_name=u'lab_controller',
                               display_name=u'Lab Controller')
        session.add(lab_controller)

    #Setup User account
    if user_name:
        if password:
            user = User(user_name=user_name.decode('utf8'), password=password.decode('utf8'))
            if user_display_name:
                user.display_name = user_display_name.decode('utf8')
            if user_email_address:
                user.email_address = user_email_address.decode('utf8')
            admin.users.append(user)
        else:
            print "Password must be provided with username"
    elif len(admin.users) == 0:
        print "No admin account exists, please create one with --user"
        sys.exit(1)

    # Create distro_expire perm if not present
    try:
        distro_expire_perm = Permission.by_name(u'distro_expire')
    except NoResultFound:
        distro_expire_perm = Permission(u'distro_expire')
        session.add(distro_expire_perm)

    # Create proxy_auth perm if not present
    try:
        proxy_auth_perm = Permission.by_name(u'proxy_auth')
    except NoResultFound:
        proxy_auth_perm = Permission(u'proxy_auth')
        session.add(proxy_auth_perm)

    # Create tag_distro perm if not present
    try:
        tag_distro_perm = Permission.by_name(u'tag_distro')
    except NoResultFound:
        tag_distro_perm = Permission(u'tag_distro')
        admin.permissions.append(tag_distro_perm)

    # Create stop_task perm if not present
    try:
        stop_task_perm = Permission.by_name(u'stop_task')
    except NoResultFound:
        stop_task_perm = Permission(u'stop_task')
        lab_controller.permissions.append(stop_task_perm)
        admin.permissions.append(stop_task_perm)

    # Create secret_visible perm if not present
    try:
        secret_visible_perm = Permission.by_name(u'secret_visible')
    except NoResultFound:
        secret_visible_perm = Permission(u'secret_visible')
        lab_controller.permissions.append(secret_visible_perm)
        admin.permissions.append(secret_visible_perm)

    #Setup Hypervisors Table
    if Hypervisor.query.count() == 0:
        for h in [u'KVM', u'Xen', u'HyperV', u'VMWare']:
            session.add(Hypervisor(hypervisor=h))

    #Setup kernel_type Table
    if KernelType.query.count() == 0:
        for type in [u'default', u'highbank', u'imx', u'omap', u'tegra']:
            session.add(KernelType(kernel_type=type, uboot=False))
        for type in [u'mvebu']:
            session.add(KernelType(kernel_type=type, uboot=True))

    #Setup base Architectures
    if Arch.query.count() == 0:
        for arch in [u'i386', u'x86_64', u'ia64', u'ppc', u'ppc64',
                u's390', u's390x', u'armhfp']:
            session.add(Arch(arch))

    #Setup base power types
    if PowerType.query.count() == 0:
        for power_type in [u'apc_snmp', u'apc_snmp_then_etherwake',
                u'bladecenter', u'bladepap', u'drac', u'ether_wake', u'hyper-v',
                u'ilo', u'integrity', u'ipmilan', u'ipmitool', u'lpar', u'rsa',
                u'virsh', u'wti']:
            session.add(PowerType(power_type))

    #Setup key types
    if Key.query.count() == 0:
        session.add(Key(u'DISKSPACE',True))
        session.add(Key(u'COMMENT'))
        session.add(Key(u'CPUFAMILY',True))
        session.add(Key(u'CPUFLAGS'))
        session.add(Key(u'CPUMODEL'))
        session.add(Key(u'CPUMODELNUMBER', True))
        session.add(Key(u'CPUSPEED',True))
        session.add(Key(u'CPUVENDOR'))
        session.add(Key(u'DISK',True))
        session.add(Key(u'FORMFACTOR'))
        session.add(Key(u'HVM'))
        session.add(Key(u'MEMORY',True))
        session.add(Key(u'MODEL'))
        session.add(Key(u'MODULE'))
        session.add(Key(u'NETWORK'))
        session.add(Key(u'NR_DISKS',True))
        session.add(Key(u'NR_ETH',True))
        session.add(Key(u'NR_IB',True))
        session.add(Key(u'PCIID'))
        session.add(Key(u'PROCESSORS',True))
        session.add(Key(u'RTCERT'))
        session.add(Key(u'SCRATCH'))
        session.add(Key(u'STORAGE'))
        session.add(Key(u'USBID'))
        session.add(Key(u'VENDOR'))
        session.add(Key(u'XENCERT'))
        session.add(Key(u'NETBOOT_METHOD'))

    #Setup ack/nak reposnses
    if Response.query.count() == 0:
        session.add(Response(response=u'ack'))
        session.add(Response(response=u'nak'))

    if RetentionTag.query.count() == 0:
        session.add(RetentionTag(tag=u'scratch', is_default=1, expire_in_days=30))
        session.add(RetentionTag(tag=u'60days', needs_product=False, expire_in_days=60))
        session.add(RetentionTag(tag=u'120days', needs_product=False, expire_in_days=120))
        session.add(RetentionTag(tag=u'active', needs_product=True))
        session.add(RetentionTag(tag=u'audit', needs_product=True))

    config_items = [
        # name, description, numeric
        (u'root_password', u'Plaintext root password for provisioned systems', False),
        (u'root_password_validity', u"Maximum number of days a user's root password is valid for", True),
        (u'default_guest_memory', u"Default memory (MB) for dynamic guest provisioning", True),
        (u'default_guest_disk_size', u"Default disk size (GB) for dynamic guest provisioning", True),
        (u'guest_name_prefix', u'Prefix for names of dynamic guests in oVirt', False),
    ]
    for name, description, numeric in config_items:
        ConfigItem.lazy_create(name=name, description=description, numeric=numeric)
    if ConfigItem.by_name(u'root_password').current_value() is None:
        ConfigItem.by_name(u'root_password').set(u'beaker', user=admin.users[0])

    session.commit()
    session.close()
示例#47
0
 def setUp(self):
     database.get_engine()
     database.metadata.create_all()