def openDB(self): try: self._config = ConfigManager() self.db = self._config.config[self._config.database]["database"] create_db = False if self.db == self._config.Sqlite: folder = self._config.config[self._config.database]["folder"] loc = folder + '/icepapcms.db' print("Using Sqlite database at %s" % loc) create_db = not os.path.exists(loc) if create_db: print("No database file found, creating it") if not os.path.exists(folder): os.mkdir(folder) self._database = create_database("%s:%s" % (self.db, loc)) else: server = self._config.config[self._config.database]["server"] user = self._config.config[self._config.database]["user"] pwd = self._config.config[self._config.database]["password"] scheme = "{}://{}:{}@{}/icepapcms".format( self.db, user, pwd, server) if self.db == 'mysql': self._database = MySQL(scheme) else: self._database = create_database(scheme) self._store = Store(self._database) if create_db: self.dbOK = self.createSqliteDB() else: self.dbOK = True except Exception as e: self.log.error("Unexpected error on openDB: %s", e) self.dbOK = False
def test_version_change_success(self): migration.perform_data_update(self.db_file) store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.assertEqual(prv.get_val('version'), __version__) store.close()
def bequeathe_flags(source_message, target_message, incumbents=None): """Destroy `source_message`, leaving flags to `target_message`. If `source_message` holds the is_current_ubuntu flag, and there are no `incumbents` that hold the same flag, then `target_message` inherits it. Similar for the is_current_upstream flag. """ sacrifice_flags(source_message, incumbents) if (source_message.is_current_ubuntu and not target_message.is_current_ubuntu): # Transfer is_current_ubuntu flag. source_message.is_current_ubuntu = False target_message.is_current_ubuntu = True Store.of(source_message).add_flush_order( source_message, target_message) if (source_message.is_current_upstream and not target_message.is_current_upstream): # Transfer is_current_upstream flag. source_message.is_current_upstream = False target_message.is_current_upstream = True Store.of(source_message).add_flush_order( source_message, target_message) source_message.destroySelf()
def insert_data(self): """ Return the SQL syntax needed to insert the data already present in the table. """ store = Store(create_database(config.Database().uri)) registers = [] rows = store.find(self.model.__class__) fields = [ r._detect_attr_name(self.model.__class__) for r in self.model._storm_columns.keys() ] for r in rows: tmp_row = {} for field in fields: tmp_row[field] = getattr(r, field) registers.append(tmp_row) if self.__class__.__name__ == 'MySQL': commas = '`' else: commas = "'" query = '' for register in registers: query += ('INSERT INTO {}{}{} ({}) VALUES ({});\n'.format( commas, self.model.__storm_table__, commas, ', '.join(register.keys()), ', '.join([(str(field) if type(field) is not unicode else "'{}'".format(field)) for field in register.values()]))) return query
def delete(self): """Deletes the exercise, providing it has no associated worksheets.""" if (self.worksheet_exercises.count() > 0): raise IntegrityError() for suite in self.test_suites: suite.delete() Store.of(self).remove(self)
def test_mig_37_valid_tor_hs_key(self): self._initStartDB(36) from globaleaks.db.migrations import update_37 t = update_37.TOR_DIR update_37.TOR_DIR = GLSettings.db_path pk_path = os.path.join(update_37.TOR_DIR, 'private_key') hn_path = os.path.join(update_37.TOR_DIR, 'hostname') shutil.copy(os.path.join(helpers.DATA_DIR, 'tor/private_key'), pk_path) shutil.copy(os.path.join(helpers.DATA_DIR, 'tor/hostname'), hn_path) ret = update_db() self.assertEqual(ret, None) new_uri = GLSettings.make_db_uri( os.path.join(GLSettings.db_path, GLSettings.db_file_name)) store = Store(create_database(new_uri)) hs = config.NodeFactory(store).get_val('onionservice') pk = config.PrivateFactory(store).get_val('tor_onion_key') self.assertEqual('lftx7dbyvlc5txtl.onion', hs) with open(os.path.join(helpers.DATA_DIR, 'tor/ephemeral_service_key')) as f: saved_key = f.read().strip() self.assertEqual(saved_key, pk) store.close() shutil.rmtree(GLSettings.db_path) update_37.TOR_DIR = t
def delete(self): """Delete this suite, without asking questions.""" for variable in self.variables: variable.delete() for test_case in self.test_cases: test_case.delete() Store.of(self).remove(self)
def setUp(self): helpers.init_glsettings_for_unit_tests() GLSettings.db_path = os.path.join(GLSettings.ramdisk_path, 'db_test') os.mkdir(GLSettings.db_path) db_name = 'glbackend-%d.db' % DATABASE_VERSION db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'db', 'populated', db_name) shutil.copyfile(db_path, os.path.join(GLSettings.db_path, db_name)) self.db_file = os.path.join(GLSettings.db_path, db_name) GLSettings.db_uri = GLSettings.make_db_uri(self.db_file) # place a dummy version in the current db store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.dummy_ver = '2.XX.XX' prv.set_val('version', self.dummy_ver) self.assertEqual(prv.get_val('version'), self.dummy_ver) store.commit() store.close() # backup various mocks that we will use self._bck_f = config.is_cfg_valid GLConfig['private']['xx_smtp_password'] = GLConfig['private'].pop('smtp_password') self.dp = u'yes_you_really_should_change_me'
def delete(self): """Delete the project. Fails if can_delete is False.""" if not self.can_delete: raise IntegrityError() for assessed in self.assesseds: assessed.delete() Store.of(self).remove(self)
def initialize(self, debug=None): """See `IDatabase`.""" # Calculate the engine url. url = expand(config.database.url, config.paths) log.debug('Database url: %s', url) # XXX By design of SQLite, database file creation does not honor # umask. See their ticket #1193: # http://www.sqlite.org/cvstrac/tktview?tn=1193,31 # # This sucks for us because the mailman.db file /must/ be group # writable, however even though we guarantee our umask is 002 here, it # still gets created without the necessary g+w permission, due to # SQLite's policy. This should only affect SQLite engines because its # the only one that creates a little file on the local file system. # This kludges around their bug by "touch"ing the database file before # SQLite has any chance to create it, thus honoring the umask and # ensuring the right permissions. We only try to do this for SQLite # engines, and yes, we could have chmod'd the file after the fact, but # half dozen and all... self.url = url self._prepare(url) database = create_database(url) store = Store(database, GenerationalCache()) database.DEBUG = (as_boolean(config.database.debug) if debug is None else debug) self.store = store store.commit()
def test_getByBuildFarmJobs(self): sprbs = [self.makeSourcePackageRecipeBuild() for i in range(10)] Store.of(sprbs[0]).flush() self.assertContentEqual( sprbs, SourcePackageRecipeBuild.getByBuildFarmJobs( [sprb.build_farm_job for sprb in sprbs]))
def bequeathe_flags(source_message, target_message, incumbents=None): """Destroy `source_message`, leaving flags to `target_message`. If `source_message` holds the is_current_ubuntu flag, and there are no `incumbents` that hold the same flag, then `target_message` inherits it. Similar for the is_current_upstream flag. """ sacrifice_flags(source_message, incumbents) if (source_message.is_current_ubuntu and not target_message.is_current_ubuntu): # Transfer is_current_ubuntu flag. source_message.is_current_ubuntu = False target_message.is_current_ubuntu = True Store.of(source_message).add_flush_order(source_message, target_message) if (source_message.is_current_upstream and not target_message.is_current_upstream): # Transfer is_current_upstream flag. source_message.is_current_upstream = False target_message.is_current_upstream = True Store.of(source_message).add_flush_order(source_message, target_message) source_message.destroySelf()
class NCBITaxonomySelector(object): def __init__(self): self.__init_database() def __init_database(self): """ creates the sqlite database instance and checks if the database exists in biodb. """ database= create_database("sqlite:%s" % biodb_sql_db_path) print "Created storm database from %s." % biodb_sql_db_path self.store= Store(database) def getTaxaByDivisionID(self, div_id): return self.store.find(BioDB, \ (NCBITaxonomyDivision.taxonID == BioDB.id) & \ (NCBITaxonomyDivision.divisionID == div_id)) def getDivisionIDByTaxonID(self, tax_id): return self.store.find(NCBITaxonomyDivision, NCBITaxonomyDivision.taxonID == tax_id).one().divisionID def getDivisionNameByID(self, div_id): return self.store.find(NCBIDivision, NCBIDivision.id == div_id).one().name
def test_generateEmail_with_null_fields(self): """GenerateEmail works when many fields are NULL.""" person = self.factory.makePerson(name='person') cake = self.factory.makeSourcePackageRecipe( name=u'recipe', owner=person) pantry_owner = self.factory.makePerson(name='archiveowner') pantry = self.factory.makeArchive(name='ppa', owner=pantry_owner) secret = self.factory.makeDistroSeries(name=u'distroseries') build = self.factory.makeSourcePackageRecipeBuild( recipe=cake, distroseries=secret, archive=pantry, status=BuildStatus.SUPERSEDED) Store.of(build).flush() ctrl = self.makeStatusEmail(build) self.assertEqual( u'[recipe build #%d] of ~person recipe in distroseries: ' 'Build for superseded Source' % (build.id), ctrl.subject) body, footer = ctrl.body.split('\n-- \n') self.assertEqual(superseded_body, body) build_url = canonical_url(build) self.assertEqual( '%s\nYou are the requester of the build.\n' % build_url, footer) self.assertEqual( config.canonical.noreply_from_address, ctrl.from_addr) self.assertEqual( 'Requester', ctrl.headers['X-Launchpad-Message-Rationale']) self.assertEqual( 'recipe-build-status', ctrl.headers['X-Launchpad-Notification-Type']) self.assertEqual( 'SUPERSEDED', ctrl.headers['X-Launchpad-Build-State'])
def take(self, count): """Take some amount of parts from this pile and return the object representing this amount. Everything gets copied over.""" assert count > 0 assert count <= self.count if count == self.count: return self take = Part() take.count = count self.count -= count take.source = self.source take.date = self.date take.price = self.price take.vat = self.vat take.part_type = self.part_type take.assignment = self.assignment take.history = self.history take.soldered = self.soldered take.usable = self.usable Store.of(self).add(take) return take
def test_check_unmodifiable_strings(self): # This test case asserts that data migration updates unmodifiable l10n strings self._initStartDB(34) notification_l10n = NotificationL10NFactory(self.store) t0 = notification_l10n.get_val("export_template", "it") notification_l10n.set_val("export_template", "it", "") t1 = notification_l10n.get_val("export_template", "it") self.assertEqual(t1, "") self.store.commit() # place a dummy version in the current db store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.dummy_ver = "2.XX.XX" prv.set_val("version", self.dummy_ver) self.assertEqual(prv.get_val("version"), self.dummy_ver) store.commit() store.close() migration.perform_data_update(self.db_file) store = Store(create_database(GLSettings.db_uri)) notification_l10n = NotificationL10NFactory(store) t2 = notification_l10n.get_val("export_template", "it") self.assertEqual(t2, t0) store.commit() store.close() shutil.rmtree(GLSettings.db_path)
def test_version_change_success(self): migration.perform_data_update(self.db_file) store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.assertEqual(prv.get_val(u'version'), __version__) store.close()
def delete(self): """Delete the assessed. Fails if there are any submissions. Deletes extensions.""" if self.submissions.count() > 0: raise IntegrityError() for extension in self.extensions: extension.delete() Store.of(self).remove(self)
def _checkValidDatabase(self, storage): '''Checks the Store to make sure it has a valid database''' store = Store(storage) for table in SCHEMA.iterkeys(): result = store.execute('SELECT * FROM `%s`' % table.lower()) self.assertEqual(result.get_all(), []) return True
def cancel(self, archive_subscriber_ids, cancelled_by): """See `IArchiveSubscriberSet`.""" Store.of(cancelled_by).find( ArchiveSubscriber, ArchiveSubscriber.id.is_in(archive_subscriber_ids)).set( date_cancelled=UTC_NOW, cancelled_by_id=cancelled_by.id, status=ArchiveSubscriberStatus.CANCELLED)
def __init__(self, table_history, old_db_file, new_db_file, start_ver): self.table_history = table_history self.old_db_file = old_db_file self.new_db_file = new_db_file self.start_ver = start_ver self.std_fancy = " ł " self.debug_info = " [%d => %d] " % (start_ver, start_ver + 1) for k, v in table_history.iteritems(): # +1 because count start from 0, # -8 because the relase befor the 8th are not supported anymore length = DATABASE_VERSION + 1 - 8 if len(v) != length: msg = 'Expecting a table with {} statuses ({})'.format(length, k) raise TypeError(msg) log.msg('{} Opening old DB: {}'.format(self.debug_info, old_db_file)) old_database = create_database('sqlite:' + self.old_db_file) self.store_old = Store(old_database) GLSetting.db_file = new_db_file new_database = create_database('sqlite:' + new_db_file) self.store_new = Store(new_database) if self.start_ver + 1 == DATABASE_VERSION: log.msg('{} Acquire SQL schema {}'.format(self.debug_info, GLSetting.db_schema_file)) if not os.access(GLSetting.db_schema_file, os.R_OK): log.msg('Unable to access', GLSetting.db_schema_file) raise IOError('Unable to access db schema file') with open(GLSetting.db_schema_file) as f: create_queries = ''.join(f).split(';') for create_query in create_queries: try: self.store_new.execute(create_query + ';') except OperationalError: log.msg('OperationalError in "{}"'.format(create_query)) self.store_new.commit() return # return here and manage the migrant versions here: for k, v in self.table_history.iteritems(): create_query = self.get_right_sql_version(k, self.start_ver + 1) if not create_query: # table not present in the version continue try: self.store_new.execute(create_query + ';') except OperationalError as excep: log.msg('{} OperationalError in [{}]'.format(self.debug_info, create_query)) raise excep self.store_new.commit()
def connect(self): opts = Config() self.database = create_database('mysql://' + opts.db_user_out + ':' + opts.db_password_out + '@' + opts.db_hostname_out + ':' + opts.db_port_out + '/' + opts.db_database_out) self.store = Store(self.database)
def destroySelf(self, user): """See `IGitRule`.""" getUtility(IGitActivitySet).logRuleRemoved(self, user) for grant in self.grants: grant.destroySelf() rules = list(self.repository.rules) Store.of(self).remove(self) rules.remove(self) removeSecurityProxy(self.repository)._syncRulePositions(rules)
def __init__(self, table_history, old_db_file, new_db_file, start_ver): self.table_history = table_history self.old_db_file = old_db_file self.new_db_file = new_db_file self.start_ver = start_ver self.std_fancy = " ł " self.debug_info = " [%d => %d] " % (start_ver, start_ver + 1) for k, v in table_history.iteritems(): length = DATABASE_VERSION + 1 - FIRST_DATABASE_VERSION_SUPPORTED if len(v) != length: msg = 'Expecting a table with {} statuses ({})'.format(length, k) raise TypeError(msg) log.msg('{} Opening old DB: {}'.format(self.debug_info, old_db_file)) old_database = create_database('sqlite:' + self.old_db_file) self.store_old = Store(old_database) GLSettings.db_file = new_db_file new_database = create_database('sqlite:' + new_db_file) self.store_new = Store(new_database) if self.start_ver + 1 == DATABASE_VERSION: log.msg('{} Acquire SQL schema {}'.format(self.debug_info, GLSettings.db_schema_file)) if not os.access(GLSettings.db_schema_file, os.R_OK): log.msg('Unable to access', GLSettings.db_schema_file) raise IOError('Unable to access db schema file') with open(GLSettings.db_schema_file) as f: create_queries = ''.join(f).split(';') for create_query in create_queries: try: self.store_new.execute(create_query + ';') except OperationalError: log.msg('OperationalError in "{}"'.format(create_query)) self.store_new.commit() return # return here and manage the migrant versions here: for k, v in self.table_history.iteritems(): create_query = self.get_right_sql_version(k, self.start_ver + 1) if not create_query: # table not present in the version continue try: self.store_new.execute(create_query + ';') except OperationalError as excep: log.msg('{} OperationalError in [{}]'.format(self.debug_info, create_query)) raise excep self.store_new.commit()
def test_builder_history(self): Store.of(self.build).flush() self.build.updateStatus(BuildStatus.FULLYBUILT, builder=self.factory.makeBuilder()) title = self.build.title browser = self.getViewBrowser(self.build.builder, "+history") self.assertTextMatchesExpressionIgnoreWhitespace( "Build history.*%s" % title, extract_text(find_main_content(browser.contents))) self.assertEqual(self.build_url, browser.getLink(title).url)
def test_ver_change_exception(self): # Explicity throw an exception in managed_ver_update via is_cfg_valid config.is_cfg_valid = apply_gen(throw_excep) self.assertRaises(IOError, migration.perform_data_update, self.db_file) store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.assertEqual(prv.get_val('version'), self.dummy_ver) store.close()
def test_ver_change_exception(self): # Explicity throw an exception in managed_ver_update via is_cfg_valid config.is_cfg_valid = apply_gen(throw_excep) self.assertRaises(IOError, migration.perform_data_update, self.db_file) store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.assertEqual(prv.get_val(u'version'), self.dummy_ver) store.close()
def test_create(self): """ L{Schema.create} can be used to create the tables of a L{Store}. """ self.assertRaises(StormError, self.store.execute, "SELECT * FROM person") self.schema.create(self.store) self.assertEquals(list(self.store.execute("SELECT * FROM person")), []) # By default changes are committed store2 = Store(self.database) self.assertEquals(list(store2.execute("SELECT * FROM person")), [])
def test_version_change_not_ok(self): # Set is_config_valid to false during managed ver update config.is_cfg_valid = apply_gen(mod_bool) self.assertRaises(Exception, migration.perform_data_update, self.db_file) # Ensure the rollback has succeeded store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.assertEqual(prv.get_val('version'), self.dummy_ver) store.close()
def test_binary_builds(self): """The binary_builds property should be populated automatically.""" spb = self.factory.makeSourcePackageRecipeBuild() multiverse = self.factory.makeComponent(name='multiverse') spr = self.factory.makeSourcePackageRelease( source_package_recipe_build=spb, component=multiverse) self.assertEqual([], list(spb.binary_builds)) binary = self.factory.makeBinaryPackageBuild(spr) self.factory.makeBinaryPackageBuild() Store.of(binary).flush() self.assertEqual([binary], list(spb.binary_builds))
def delete(self): """Deletes the worksheet, provided it has no attempts on any exercises. Returns True if delete succeeded, or False if this worksheet has attempts attached.""" for ws_ex in self.all_worksheet_exercises: if ws_ex.saves.count() > 0 or ws_ex.attempts.count() > 0: raise IntegrityError() self.remove_all_exercises() Store.of(self).remove(self)
def test_update_existing_record(self): '''Existing records should be updated.''' handler = self.handler() _video = handler(self.filename) videos = Store.of(_video).find(models.VideoFile, models.VideoFile.filename == self.filename) self.assertEqual(videos.count(), 1) _video = handler(self.filename) videos = Store.of(_video).find(models.VideoFile, models.VideoFile.filename == self.filename) self.assertEqual(videos.count(), 1)
def setUp(self): super(PatchApplierTest, self).setUp() self.patchdir = self.makeDir() self.pkgdir = os.path.join(self.patchdir, "mypackage") os.makedirs(self.pkgdir) f = open(os.path.join(self.pkgdir, "__init__.py"), "w") f.write("shared_data = []") f.close() # Order of creation here is important to try to screw up the # patch ordering, as os.listdir returns in order of mtime (or # something). for pname, data in [("patch_380.py", patch_test_1), ("patch_42.py", patch_test_0)]: self.add_module(pname, data) sys.path.append(self.patchdir) self.filename = self.makeFile() self.uri = "sqlite:///%s" % self.filename self.store = Store(create_database(self.uri)) self.store.execute("CREATE TABLE patch " "(version INTEGER NOT NULL PRIMARY KEY)") self.assertFalse(self.store.get(Patch, (42))) self.assertFalse(self.store.get(Patch, (380))) import mypackage self.mypackage = mypackage self.patch_set = PatchSet(mypackage) # Create another connection just to keep track of the state of the # whole transaction manager. See the assertion functions below. self.another_store = Store(create_database("sqlite:")) self.another_store.execute("CREATE TABLE test (id INT)") self.another_store.commit() self.prepare_for_transaction_check() class Committer(object): def commit(committer): self.store.commit() self.another_store.commit() def rollback(committer): self.store.rollback() self.another_store.rollback() self.committer = Committer() self.patch_applier = PatchApplier(self.store, self.patch_set, self.committer)
def test_version_change_not_ok(self): # Set is_config_valid to false during managed ver update config.is_cfg_valid = apply_gen(mod_bool) self.assertRaises(Exception, migration.perform_data_update, self.db_file) # Ensure the rollback has succeeded store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.assertEqual(prv.get_val(u'version'), self.dummy_ver) store.close()
def test_builder_history(self): build = self.makeRecipeBuild() Store.of(build).flush() build_url = canonical_url(build) build.updateStatus( BuildStatus.FULLYBUILT, builder=self.factory.makeBuilder()) browser = self.getViewBrowser(build.builder, '+history') self.assertTextMatchesExpressionIgnoreWhitespace( 'Build history.*~chef/chocolate/cake recipe build', extract_text(find_main_content(browser.contents))) self.assertEqual(build_url, browser.getLink('~chef/chocolate/cake recipe build').url)
def set_config(self, **kwargs): """Set the configuration of this back-end.""" uri = kwargs['uri'] database = create_database(uri) self.store = Store(database) self.logger = logging.getLogger('StormStorageBackend') handler = logging.StreamHandler() formatter = logging.Formatter(kwargs['log_format']) handler.setFormatter(formatter) self.logger.addHandler(handler) self.logger.setLevel( logging.__getattribute__(kwargs['log_level']))
def test_update_existing_record(self): '''Existing records should be updated.''' handler = self.handler() _image = handler(self.filename) images = Store.of(_image).find(models.PhotoImage, models.PhotoImage.filename == self.filename) self.assertEqual(images.count(), 1) _image = handler(self.filename) images = Store.of(_image).find(models.PhotoImage, models.PhotoImage.filename == self.filename) self.assertEqual(images.count(), 1)
def setUp(self): super(PatchTest, self).setUp() self.patchdir = self.makeDir() self.pkgdir = os.path.join(self.patchdir, "mypackage") os.makedirs(self.pkgdir) f = open(os.path.join(self.pkgdir, "__init__.py"), "w") f.write("shared_data = []") f.close() # Order of creation here is important to try to screw up the # patch ordering, as os.listdir returns in order of mtime (or # something). for pname, data in [("patch_380.py", patch_test_1), ("patch_42.py", patch_test_0)]: self.add_module(pname, data) sys.path.append(self.patchdir) self.filename = self.makeFile() self.uri = "sqlite:///%s" % self.filename self.store = Store(create_database(self.uri)) self.store.execute("CREATE TABLE patch " "(version INTEGER NOT NULL PRIMARY KEY)") self.assertFalse(self.store.get(Patch, (42))) self.assertFalse(self.store.get(Patch, (380))) import mypackage self.mypackage = mypackage # Create another connection just to keep track of the state of the # whole transaction manager. See the assertion functions below. self.another_store = Store(create_database("sqlite:")) self.another_store.execute("CREATE TABLE test (id INT)") self.another_store.commit() self.prepare_for_transaction_check() class Committer(object): def commit(committer): self.store.commit() self.another_store.commit() def rollback(committer): self.store.rollback() self.another_store.rollback() self.committer = Committer() self.patch_applier = PatchApplier(self.store, self.mypackage, self.committer)
def postconditions_36(self): new_uri = GLSettings.make_db_uri( os.path.join(GLSettings.db_path, GLSettings.db_file_name)) store = Store(create_database(new_uri)) hs = config.NodeFactory(store).get_val(u'onionservice') pk = config.PrivateFactory(store).get_val(u'tor_onion_key') self.assertEqual('lftx7dbyvlc5txtl.onion', hs) with open(os.path.join(helpers.DATA_DIR, 'tor/ephemeral_service_key')) as f: saved_key = f.read().strip() self.assertEqual(saved_key, pk) store.close()
def test_trim_value_to_range(self): store = Store(create_database(GLSettings.db_uri)) nf = config.NodeFactory(store) fake_cfg = nf.get_cfg(u'wbtip_timetolive') self.assertRaises(errors.InvalidModelInput, fake_cfg.set_v, 3650) fake_cfg.value = {'v': 3650} store.commit() MigrationBase.trim_value_to_range(nf, u'wbtip_timetolive') self.assertEqual(nf.get_val(u'wbtip_timetolive'), 365 * 2)
def test_builder_history(self): build = self.makeRecipeBuild() Store.of(build).flush() build_url = canonical_url(build) build.updateStatus(BuildStatus.FULLYBUILT, builder=self.factory.makeBuilder()) browser = self.getViewBrowser(build.builder, '+history') self.assertTextMatchesExpressionIgnoreWhitespace( 'Build history.*~chef/chocolate/cake recipe build', extract_text(find_main_content(browser.contents))) self.assertEqual( build_url, browser.getLink('~chef/chocolate/cake recipe build').url)
def do_statspollute(dbfile): # source gl_database = create_database("sqlite:%s" % dbfile) source_store = Store(gl_database) stats = source_store.find(models.Stats) counter = 0 for s in stats: source_store.remove(s) counter += 1 print "removed %d entry in stats" % counter counter = 0 # 21 days in the past for past_hours in xrange(24 * 7 * 3): past_hours += 4 when = utc_past_date(hours=past_hours) newstat = models.Stats() newstat.freemb = randint(1000, 1050) newstat.year = when.isocalendar()[0] newstat.week = when.isocalendar()[1] level = round((randint(0, 1000) / 240.0), 1) - 2 def random_pollution(): return int(randint(0,11) + (5 * level)) activity_fake = { 'successfull_logins': random_pollution(), 'failed_logins': random_pollution(), 'started_submissions': random_pollution(), 'completed_submissions': random_pollution(), 'uploaded_files': int(randint(0,11) + (5 * level)), 'appended_files': random_pollution(), 'wb_comments': random_pollution(), 'wb_messages': random_pollution(), 'receiver_comments': random_pollution(), 'receiver_messages': random_pollution() } for k, v in activity_fake.iteritems(): if v < 0: activity_fake[k] = 0 newstat.start = when newstat.summary = activity_fake counter += 1 source_store.add(newstat) print "Committing %d stats" % counter source_store.commit()
def test_ver_change_exception(self): # Explicity throw an exception in managed_ver_update via is_cfg_valid config.is_cfg_valid = apply_gen(throw_excep) try: yield migration.perform_data_update(self.db_file) self.fail() except IOError as e: self.assertIsInstance(e, IOError) store = Store(create_database(GLSettings.db_uri)) prv = config.PrivateFactory(store) self.assertEqual(prv.get_val('version'), self.dummy_ver) store.close()
def test_update_existing_record(self): '''The same file should not be indexed twice, but updated.''' handler = self.handler() _mp3 = handler(self.filename) mp3 = Store.of(_mp3).find(models.MusicTrack, models.MusicTrack.filename == self.filename).one() mp3.comment = u'Foo bar baz' Store.of(mp3).commit() _mp3 = handler(self.filename) files = Store.of(_mp3).find(models.MusicTrack, models.MusicTrack.filename == self.filename) self.assertEqual(files.count(), 1) self.assertEqual(files.one().comment, u'This is a comment')
def _initStartDB(self, target_ver): helpers.init_glsettings_for_unit_tests() GLSettings.db_path = os.path.join(GLSettings.ramdisk_path, 'db_test') os.mkdir(GLSettings.db_path) db_name = 'glbackend-%d.db' % target_ver db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'db', 'populated', db_name) shutil.copyfile(db_path, os.path.join(GLSettings.db_path, db_name)) self.db_file = os.path.join(GLSettings.db_path, db_name) GLSettings.db_uri = GLSettings.make_db_uri(self.db_file) self.store = Store(create_database(GLSettings.db_uri))
def usable_distro_series(self, value): enablements = dict( Store.of(self).find( (DistroSeries, SnappyDistroSeries), SnappyDistroSeries.snappy_series == self, SnappyDistroSeries.distro_series_id == DistroSeries.id)) for distro_series in enablements: if distro_series not in value: if enablements[distro_series].preferred: get_property_cache(self)._preferred_distro_series = None Store.of(self).remove(enablements[distro_series]) for distro_series in value: if distro_series not in enablements: link = SnappyDistroSeries(self, distro_series) Store.of(self).add(link)
def clone_worksheets(self, source): """Clone all worksheets from the specified source to this offering.""" import ivle.worksheet.utils for worksheet in source.worksheets: newws = Worksheet() newws.seq_no = worksheet.seq_no newws.identifier = worksheet.identifier newws.name = worksheet.name newws.assessable = worksheet.assessable newws.published = worksheet.published newws.data = worksheet.data newws.format = worksheet.format newws.offering = self Store.of(self).add(newws) ivle.worksheet.utils.update_exerciselist(newws)
def setUp(self): super(SchemaTest, self).setUp() self.database = create_database("sqlite:///%s" % self.makeFile()) self.store = Store(self.database) self._package_dirs = set() self._package_names = set() self.package = self.create_package(self.makeDir(), "patch_package") import patch_package creates = ["CREATE TABLE person (id INTEGER, name TEXT)"] drops = ["DROP TABLE person"] deletes = ["DELETE FROM person"] self.schema = Schema(creates, drops, deletes, patch_package)
def new(cls, distribution, sourcepackagename, is_upstream_link_allowed=False): """Create a new DSP with the given parameters. Caches the `(distro_id, spn_id) --> dsp_id` mapping. """ dsp = DistributionSourcePackageInDatabase() dsp.distribution = distribution dsp.sourcepackagename = sourcepackagename dsp.is_upstream_link_allowed = is_upstream_link_allowed Store.of(distribution).add(dsp) Store.of(distribution).flush() dsp_cache_key = distribution.id, sourcepackagename.id cls._cache[dsp_cache_key] = dsp.id return dsp
def getSpecifications(self, user): """See `IMilestoneData`""" from lp.registry.model.person import Person origin = [Specification] product_origin, clauses = get_specification_active_product_filter( self) origin.extend(product_origin) clauses.extend(get_specification_privacy_filter(user)) origin.append(LeftJoin(Person, Specification._assigneeID == Person.id)) milestones = self._milestone_ids_expr(user) results = Store.of(self.target).using(*origin).find( (Specification, Person), Specification.id.is_in( Union( Select( Specification.id, tables=[Specification], where=(Specification.milestoneID.is_in(milestones))), Select( SpecificationWorkItem.specification_id, tables=[SpecificationWorkItem], where=And( SpecificationWorkItem.milestone_id.is_in( milestones), SpecificationWorkItem.deleted == False)), all=True)), *clauses) ordered_results = results.order_by( Desc(Specification.priority), Specification.definition_status, Specification.implementation_status, Specification.title) ordered_results.config(distinct=True) return DecoratedResultSet(ordered_results, itemgetter(0))
def vote(self, value, user_id): store = Store.of(self) # Checks if the user has already voted for this message. existing = self.votes.find(Vote.user_id == user_id).one() if existing is not None and existing.value == value: return # Vote already recorded (should I raise an exception?) if value not in (0, 1, -1): raise ValueError("A vote can only be +1 or -1 (or 0 to cancel)") # The vote can be added, changed or cancelled. Keep it simple and # delete likes and dislikes cached values. store.cache.delete_multi(( # this message's (dis)likes count str("list:%s:email:%s:likes" % (self.list_name, self.message_id)), str("list:%s:email:%s:dislikes" % (self.list_name, self.message_id)), # this thread (dis)likes count str("list:%s:thread:%s:likes" % (self.list_name, self.thread_id)), str("list:%s:thread:%s:dislikes" % (self.list_name, self.thread_id)), # the user's vote count on this list str("user:%s:list:%s:votes" % (user_id, self.list_name)), )) if existing is not None: # vote changed or cancelled if value == 0: store.remove(existing) else: existing.value = value else: # new vote if store.get(User, user_id) is None: store.add(User(user_id)) store.add(Vote(self.list_name, self.message_id, user_id, value))
def _get_milestones(self): """See `IHasMilestones`.""" store = Store.of(self) result = store.find(Milestone, And(self._getMilestoneCondition(), Milestone.active == True)) return result.order_by(self._milestone_order)
def submit(self, principal, path, revision, who, late=False): """Submit a Subversion path and revision to a project. @param principal: The owner of the Subversion repository, and the entity on behalf of whom the submission is being made @param path: A path within that repository to submit. @param revision: The revision of that path to submit. @param who: The user who is actually making the submission. @param late: If True, will not raise a DeadlinePassed exception even after the deadline. (Default False.) """ if not self.can_submit(principal, who, late=late): raise DeadlinePassed() a = Assessed.get(Store.of(self), principal, self) ps = ProjectSubmission() # Raise SubmissionError if the path is illegal ps.path = ProjectSubmission.test_and_normalise_path(path) ps.revision = revision ps.date_submitted = datetime.datetime.now() ps.assessed = a ps.submitter = who return ps