def prologue(self): message.message.verbosity(message.INT_DEBUG) message.warning('a warning %(c)d', c=666) import os, signal message.debug(str(os.getpid())) os.kill(os.getpid(), signal.SIGSEGV)
def prologue(self) : message.message.verbosity(message.INT_DEBUG) message.warning('a warning %(c)d', c=666) import os, signal message.debug(str(os.getpid())) os.kill(os.getpid(), signal.SIGSEGV)
def close(self): table = 'goal' if self.reference else 'hits' if len(self.data) == 0: message.note('No data to upload into table "%(table)s", skipping', table=table) return message.information( 'starting data upload to table "%(table)s" via insert', table=table) with mdb.mdb.cursor() as cursor: rows = cursor.executemany( 'INSERT INTO ' + table + ' (log_id, bucket_id, ' + table + ') VALUES (%s, %s, %s);', self.data) warnings = cursor.warning_count() if warnings: message.warning('upload to db via insert with %(warnings)', warnings=warnings) if rows is None: message.warning( 'upload to db via insert "%(table)s" returned None', table=table) else: message.information( 'upload to db via insert added %(rows)d rows of %(data)d to "%(table)s"', rows=int(rows), data=len(self.data), table=table)
def get_all_assets(self): (css_list, js_list) = ([], []) strip = lambda _: _.strip() parser = self._get_parser() section = 'Framework' if parser.has_option(section, 'user_css'): value = parser.get(section, 'user_css') css_list = value.split(' ') if value else [] if parser.has_option(section, 'user_js'): value = parser.get(section, 'user_js') js_list = value.split(' ') if value else [] if parser.has_option(section, 'libs'): value = parser.get(section, 'libs') if value: libs = set(filter(bool, map(strip, value.split(' ')))) dependencies = set() for lib in libs: self.get_all_dependencies(lib, dependencies) for name in dependencies: if name not in self.libs: warning("%s lib doesn't exist!" % name) continue group = self.libs[name] if 'js' in group: js_list += group['js'] if 'css' in group: css_list += group['css'] js_list = set(filter(bool, map(strip, js_list))) css_list = set(filter(bool, map(strip, css_list))) return (css_list, js_list)
def connect(self, *args, **kwargs): try: self.db = kwargs['db'] except KeyError: self.db = self.default_db try: instance = MySQLdb.connect(host=self.default_host, port=self.default_port, db=self.default_db, user=self.default_user, passwd=self.default_passwd) instance.autocommit(False) except: message.warning( 'Unable to connect to mysql db %(db)s at %(host)s:%(port)d because %(exc)s', db=self.db, host=self.default_host, port=self.default_port, exc=sys.exc_info()[0]) return message.note( "Connected to mysql db %(db)s at %(host)s:%(port)d for %(thread)s", db=self.default_db, host=self.default_host, port=self.default_port, thread=self.id()) # this should be keyed on db too - but we don't used multiple databases currently self.instance[self.id()] = instance
def success(self): 'Generic success hook' if self.is_success is not None: message.warning( 'success() called after test issue (status : %(status)s)', status=str(self.is_success)) self.is_success = True self.SUCCESS()
def coverage(self, pad=True, cursor=True) : with mdb.connection().row_cursor() if cursor else mdb.connection().cursor() as db : db.execute(self.query % self.__dict__) for result in db.fetchall() : yield result while (pad) : message.warning('missing bucket') yield {}
def plusarg_opt_int(cls, name, default, fmt='08x') : 'To get default/command line options' try : result = int(plusargs().get(name, str(default)), 0) except : message.warning(str(sys.exc_info())) result = default message.information('Using %(result)'+fmt+' for option "%(name)s"', result=result, name=name) return result
def coverage(self, pad=True, cursor=True): with mdb.connection().row_cursor() if cursor else mdb.connection( ).cursor() as db: db.execute(self.query % self.__dict__) for result in db.fetchall(): yield result while (pad): message.warning('missing bucket') yield {}
def number_of_axes(self, max_axes) : limit = len(self.NAMES) if max_axes < 1 : result = 1 elif max_axes > limit : result = limit message.warning('limit of %(limit)d axes', limit=limit) else : result = max_axes return result+1
def number_of_axes(self, max_axes): limit = len(self.NAMES) if max_axes < 1: result = 1 elif max_axes > limit: result = limit message.warning('limit of %(limit)d axes', limit=limit) else: result = max_axes return result + 1
def summary(self, verbose=False) : is_root = mdb_conn.is_root() results = database.rgr().result(mdb_conn.log_id, is_root) result = results.summary() if result.passes != result.total : if is_root or verbose : for test in results[1:] : # drop this if test.status.status is not 'PASS' : message.warning("[%(log_id)d, %(status)s] %(reason)s", log_id=test.log.log_id, **test.status) result.summary() return result
def plusarg_opt_int(cls, name, default, fmt='08x'): 'To get default/command line options' try: result = int(plusargs().get(name, str(default)), 0) except: message.warning(str(sys.exc_info())) result = default message.information('Using %(result)' + fmt + ' for option "%(name)s"', result=result, name=name) return result
def connect(self, *args, **kwargs) : try : self.db = kwargs['db'] except KeyError: self.db = self.default_db try : instance = sqlite3.connect(self.db) except : message.warning('Unable to connect. File %(db)s because %(exc)s', db=self.db, exc=sys.exc_info()[0]) instance.execute('PRAGMA journal_mode=WAL;') instance.execute('PRAGMA read_uncommitted = 1;') self.instance[self.id()] = instance
def enqueue(cmd): 'just execute here' message.debug('enqueue %(cmd)s', cmd=cmd) result = subprocess.Popen( cmd.split(' '), env=dict(os.environ, MDB='root=' + str(self.mdb.get_root()) + ',parent=' + str(self.mdb.log_id))).wait() if result > 0: message.warning('process %(cmd)s returned non zero %(result)d', cmd=cmd, result=result)
def summary(self, verbose=False): is_root = mdb_conn.is_root() results = database.rgr().result(mdb_conn.log_id, is_root) result = results.summary() if result.passes != result.total: if is_root or verbose: for test in results[1:]: # drop this if test.status.status is not 'PASS': message.warning("[%(log_id)d, %(status)s] %(reason)s", log_id=test.log.log_id, **test.status) result.summary() return result
def __init__(self, log_ids=[], test_ids=[], xml=None, threshold=0, robust=False, previous=None) : 'log_ids is a list of regression roots' self.log_ids = log_ids s_log_ids = ','.join(map(str, log_ids)) self.tests = mdb.connection().row_cursor() if log_ids : # create table of individual runs, but not root node as this may have already summarised coverage self.tests.execute('CREATE TEMPORARY TABLE '+self.invs+' AS SELECT l1.*, goal_id AS master FROM log AS l0 JOIN log AS l1 ON (l0.log_id = l1.root) LEFT OUTER JOIN master ON (l1.log_id = master.log_id) WHERE l1.root IN ('+s_log_ids+');') self.tests.execute('SELECT count(*) AS children FROM '+self.invs) children = self.tests.fetchone().children if children : message.information('%(log_ids)s %(has)s %(children)d children', log_ids=s_log_ids, children=children, has='have' if len(log_ids) > 1 else 'has') # append individual runs as given by test_ids if xml : xml_ids = xml.xml.xpath('/optimize/test/log_id/text()') else : xml_ids=[] if test_ids or xml_ids : s_test_ids = ','.join(map(str, test_ids+xml_ids)) create = ('INSERT INTO '+self.invs) if log_ids else ('CREATE TEMPORARY TABLE '+self.invs+' AS') self.tests.execute(create+' SELECT log.*, IFNULL(goal_id, goal.log_id) AS master FROM log LEFT OUTER JOIN master ON (log.log_id = master.log_id) LEFT OUTER JOIN goal ON (log.log_id = goal.log_id) WHERE log.log_id IN ('+s_test_ids+') GROUP BY log_id;') self.tests.execute('SELECT count(*) AS tests FROM '+self.invs) tests = self.tests.fetchone().tests if tests < 1 : message.fatal('no tests') message.information('starting with %(count)d tests in table %(table)s', count=tests, table=self.invs) # check congruency self.cvg = mdb.connection().row_cursor() rows=self.cvg.execute("SELECT md5_self AS md5, 'md5_self' AS type, invs.master, invs.root FROM point JOIN "+self.invs+" AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;") md5 = self.cvg.fetchall() if not md5 : message.fatal('no master') elif len(md5) > 1 : message.fatal('md5 of multiple masters do not match') else : message.debug('md5 query returns %(rows)d', rows=rows) self.master = mdb.accessor(md5=md5[0]) self.cvg.execute("SELECT DISTINCT(md5_axes) AS md5, 'md5_axes' AS type, invs.master, invs.root FROM point JOIN "+self.invs+" AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;") md5 = self.cvg.fetchall() if len(md5) > 1 : message.fatal('md5 of multiple axis masters do not match') self.master.axes = md5[0] # create status table, collating goal & hits self.cvg.execute('CREATE TEMPORARY TABLE '+self.covg+' (bucket_id INTEGER NOT NULL PRIMARY KEY, goal INTEGER, hits INTEGER, total_hits INTEGER, rhits INTEGER, max_hits INTEGER, tests INTEGER);') try : self.threshold = float(threshold) except : self.threshold = 0.0 message.warning('cannot convert threshold value given "%(arg)s" to float because %(exception)s, using %(threshold)2.1f', arg=threshold, exception=sys.exc_info()[0], threshold=self.threshold) self.robust = robust self.previous = previous
def retry(self, fn0, fn1=None) : for attempt in range(0, connection.RETRIES) : try : return fn0() except MySQLdb.OperationalError : if sys.exc_info()[1].args == (2006, 'MySQL server has gone away') : message.warning('MySQL connection lost; retrying') self.reconnect() if fn1 : fn1() else : raise message.warning('retried %(n)d times', n=connection.RETRIES) raise
def enqueue(self, cmd): 'just execute here' message.debug('enqueue %(cmd)s', cmd=cmd) result = subprocess.Popen( cmd.split(' '), env=dict(os.environ, MDB='root=' + str(mdb_conn.get_root()) + ',parent=' + str(mdb_conn.log_id), PYTHONPATH=os.environ['PYTHONPATH'] + ':../python')).wait() if result > 0: message.warning('process %(cmd)s returned non zero %(result)d', cmd=cmd, result=result)
def cb_fn(self) : 'As callback executes vpi_get_value, could change callback fn and cb_filter signature to fn(value)' self.cnt += 1 if self.cb_filter() : self.filtered += 1 return for func in self.funcs : try : func() except Exception, exc: self.excepted += 1 message.error('Exception in callback "%(name)s" : %(exc)s', exc=str(exc), name=self.name) for detail in traceback.format_exc().strip('\n').split('\n') : message.warning(detail)
def connect(self, *args, **kwargs): try: self.db = kwargs['db'] except KeyError: self.db = self.default_db try: instance = sqlite3.connect(self.db) except: message.warning('Unable to connect. File %(db)s because %(exc)s', db=self.db, exc=sys.exc_info()[0]) instance.execute('PRAGMA journal_mode=WAL;') instance.execute('PRAGMA read_uncommitted = 1;') self.instance[self.id()] = instance
def retry(self, fn0, fn1=None): for attempt in range(0, connection.RETRIES): try: return fn0() except MySQLdb.OperationalError: if sys.exc_info()[1].args == (2006, 'MySQL server has gone away'): message.warning('MySQL connection lost; retrying') self.reconnect() if fn1: fn1() else: raise message.warning('retried %(n)d times', n=connection.RETRIES) raise
def close(self) : table = 'goal' if self.reference else 'hits' if len(self.data) == 0 : message.note('No data to upload into table "%(table)s", skipping', table=table) return message.information('starting data upload to table "%(table)s" via insert', table=table) with mdb.mdb.cursor() as cursor : rows = cursor.executemany('INSERT INTO '+table+' (log_id, bucket_id, '+table+') VALUES (%s, %s, %s);', self.data) warnings = cursor.warning_count() if warnings : message.warning('upload to db via insert with %(warnings)', warnings=warnings) if rows is None : message.warning('upload to db via insert "%(table)s" returned None', table=table) else : message.information('upload to db via insert added %(rows)d rows of %(data)d to "%(table)s"', rows=int(rows), data=len(self.data), table=table)
def connect(self, *args, **kwargs) : try : self.db = kwargs['db'] except KeyError: self.db = self.default_db try : instance = MySQLdb.connect( host=self.default_host, port=self.default_port, db=self.default_db, user=self.default_user, passwd=self.default_passwd ) instance.autocommit(False) except : message.warning('Unable to connect to mysql db %(db)s at %(host)s:%(port)d because %(exc)s', db=self.db, host=self.default_host, port=self.default_port, exc=sys.exc_info()[0]) return message.note("Connected to mysql db %(db)s at %(host)s:%(port)d for %(thread)s", db=self.default_db, host=self.default_host, port=self.default_port, thread=self.id()) # this should be keyed on db too - but we don't used multiple databases currently self.instance[self.id()] = instance
def _set(paths): """ Loads a Set from one or more YAML files. Parameters paths iterable having the file paths to be loaded. Raises IOError if one of the files could not be read. YAMLError if one of the files is not a valid YAML file. validate.SemanticError if one of the files is semantically invalid. Returns Set containing all units found in the specified files. """ units = Set() groups = {} virtuals = {} registry = Registry() for path in paths: try: definition = yaml(path) validate.set(definition) except IOError: raise except YAMLError: raise except validate.SemanticError: raise repository = findall('([a-zA-Z0-9]+)', path)[-3] for name in definition.iterkeys(): for version in definition[name].iterkeys(): for architecture in definition[name][version].iterkeys(): data = definition[name][version][architecture] package = Package(name, version, architecture, repository, data) try: registry.add_package(name, version, architecture) except PackageInRegistry: warning( "duplicate package found: {0}:{1} {2} from repository '{3}'. Ignoring." .format(name, architecture, version, repository)) break except GroupInRegistry: warning( "name conflict between group {0} from repository '{3}' and package {0}:{1} {2}. Ignoring." .format(name, architecture, version, repository)) break except VirtualPackageInRegistry: warning( "name conflict between virtual package {0} from repository '{3}' and package {0}:{1} {2}. Ignoring." .format(name, architecture, version, repository)) break if data['provides'] is not None: for virtual in data['provides']: try: registry.add_virtual(virtual) except PackageInRegistry: warning( "name conflict between virtual package {0} from repository '{3}' and package {0}:{1} {2}. Ignoring." .format(name, architecture, version, repository)) break except GroupInRegistry: warning( "name conflict between virtual package {0} from repository '{1}' and group {0}. Ignoring." .format(virtual, repository)) break try: virtuals[virtual].provided_by(package) except KeyError: virtuals[virtual] = VirtualPackage(virtual) virtuals[virtual].provided_by(package) if data['groups'] is not None: for group in data['groups']: try: registry.add_group(group) except VirtualPackageInRegistry: warning( "name conflict between group {0} from repository '{1}' and virtual package {0}. Ignoring." .format(group, repository)) break except PackageInRegistry: warning( "name conflict between group {0} from repository '{3}' and package {0}:{1} {2}. Ignoring." .format(name, architecture, version, repository)) break try: groups[group].add(package) except KeyError: groups[group] = Group(group) groups[group].add(package) units.add(package) for group in groups.iterkeys(): units.add(groups[group]) for virtual in virtuals.iterkeys(): units.add(virtuals[virtual]) return units
def incr(self, hits=1): try: self.point.buckets[self.bucket_id()].incr(hits) except axisValueError: message.warning('increment is not registered')
def prologue(self): message.warning('no prologue defined')
def success(self) : 'Generic success hook' if self.is_success is not None : message.warning('success() called after test issue (status : %(status)s)', status=str(self.is_success)) self.is_success = True self.SUCCESS()
def prologue(self) : message.warning('no prologue defined')
def epilogue(self): message.warning('no epilogue defined')
def _install(configuration, installed, package, filepath): """ Performs a low-level package installation. Parameters configuration a valid Craft Configuration object. installed Set having all currently installed units on the system. package the Package unit to be installed. filepath absolute filesystem path of the package's archive to be installed. Raises InstallError if any error occurs during the installation. OSError if, in case an operation has failed, it is not possible to cleanly recover from it. Returns True if the installation was successfully completed. """ architecture = package.architecture name = package.name version = package.version db = configuration.db() package_directory = db + 'installed/' + name + '/' + version + '/' + architecture craft_directories = [ db + 'installed/', db + 'installed/' + name, db + 'installed/' + name + '/' + version ] for each in craft_directories: try: mkdir(each) except OSError: pass if package in installed: message.warning( "'{0}' is already installed. Aborting...".format(package)) raise InstallError(package) try: mkdir(package_directory) except OSError: message.warning( "failed to create internal directory while installing '{0}'. Aborting..." .format(package)) raise InstallError(package) try: chdir(package_directory) except OSError: message.warning( "could not access the directory belonging to package '{0}'. Aborting..." .format(package)) raise InstallError(package) sha1 = package.has_checksum('sha1') if sha1: if not filepath: message.warning( "missing archive filepath for package '{0}'. Aborting...". format(package)) raise InstallError(package) if not checksum.sha1(filepath, sha1): message.warning( "inconsistent archive provided for package '{0}'. Aborting...". format(package)) try: rmtree(package_directory) except OSError: raise raise InstallError(package) package_files = archive.getfiles(filepath) if not package_files: message.warning( "empty archive provided for package '{0}'. Aborting...".format( package)) try: rmtree(package_directory) except OSError: raise raise InstallError(package) try: package_files_dump_handle = open('files', 'w') except IOError: message.warning( "could not write the file list for package '{0}'. Aborting...". format(package)) try: rmtree(package_directory) except OSError: raise raise InstallError(package) else: for each in package_files: package_files_dump_handle.write(each + '\n') package_files_dump_handle.close() if not archive.extract(filepath, configuration.root()): message.warning( "could not extract the archive provided for package '{0}'. Aborting..." .format(package)) try: rmtree(package_directory) except OSError: raise raise InstallError(package) try: if not dump.package(package, 'metadata.yml'): message.warning( "failed to write metadata.yml for package '{0}'. Aborting...". format(package)) raise InstallError(package) except IOError: raise installed.add(package) return True
def download(configuration, packages): """ Download packages. Parameters configuration a valid Craft Configuration object. packages an iterable having the Package units to be downloaded. Raises RepositoryError in case an invalid repository was specified. DownloadError in case of failure. Returns True in case all specified packages have been successfully downloaded. """ grouped_packages = {} db = configuration.db() repositories = configuration.repositories() packages = list(packages) for package in list(packages): if isfile(db+'/available/'+package.repository+'/cache/'+package.name+'/'+package.version+'/'+package.architecture+'/package.tar.gz'): packages.remove(package) for package in packages: try: grouped_packages[package.repository].append(package) except KeyError: grouped_packages[package.repository] = [] grouped_packages[package.repository].append(package) for repository_name in grouped_packages.iterkeys(): try: repository = repositories[repository_name] except KeyError: raise RepositoryError(repository_name) try: environment.merge(repository['env']) except environment.EnvironmentError: message.warning("could not merge the environment variables associated to the repository '{0}'!".format(repository_name)) except KeyError: pass for package in grouped_packages[repository_name]: if package.has_checksum(): n = package.name v = package.version a = package.architecture directories = [ db+'/available', db+'/available/'+repository_name, db+'/available/'+repository_name+'/cache', db+'/available/'+repository_name+'/cache/'+n, db+'/available/'+repository_name+'/cache/'+n+'/'+v, db+'/available/'+repository_name+'/cache/'+n+'/'+v+'/'+a ] for directory in directories: try: mkdir(directory) except OSError: pass try: chdir(db+'/available/'+package.repository+'/cache/'+n+'/'+v+'/'+a) except OSError: raise DownloadError(package) if not isfile('package.tar.gz'): handler = repository['handler'] target = "{0}/{1}/{2}/{3}/package.tar.gz".format(repository['target'], n, v, a) if system(handler+' '+target) != 0: raise DownloadError(package) try: environment.purge(repository['env'].keys()) except environment.EnvironmentError: message.warning("could not purge the environment variables associated to the repository '{0}'!".format(package.repository)) except KeyError: pass return True
def set_default_db(cls, **args) : message.warning('set default db on mysql')
def __exit__(self, exc_type, exc_value, traceback) : try : self.close() except : message.warning('cursor close raised exception %(exc)s', exc=sys.exc_info()[1])
def _set(paths): """ Loads a Set from one or more YAML files. Parameters paths iterable having the file paths to be loaded. Raises IOError if one of the files could not be read. YAMLError if one of the files is not a valid YAML file. validate.SemanticError if one of the files is semantically invalid. Returns Set containing all units found in the specified files. """ units = Set() groups = {} virtuals = {} registry = Registry() for path in paths: try: definition = yaml(path) validate.set(definition) except IOError: raise except YAMLError: raise except validate.SemanticError: raise repository = findall('([a-zA-Z0-9]+)', path)[-3] for name in definition.iterkeys(): for version in definition[name].iterkeys(): for architecture in definition[name][version].iterkeys(): data = definition[name][version][architecture] package = Package(name, version, architecture, repository, data) try: registry.add_package(name, version, architecture) except PackageInRegistry: warning("duplicate package found: {0}:{1} {2} from repository '{3}'. Ignoring.".format(name, architecture, version, repository)) break except GroupInRegistry: warning("name conflict between group {0} from repository '{3}' and package {0}:{1} {2}. Ignoring.".format(name, architecture, version, repository)) break except VirtualPackageInRegistry: warning("name conflict between virtual package {0} from repository '{3}' and package {0}:{1} {2}. Ignoring.".format(name, architecture, version, repository)) break if data['provides'] is not None: for virtual in data['provides']: try: registry.add_virtual(virtual) except PackageInRegistry: warning("name conflict between virtual package {0} from repository '{3}' and package {0}:{1} {2}. Ignoring.".format(name, architecture, version, repository)) break except GroupInRegistry: warning("name conflict between virtual package {0} from repository '{1}' and group {0}. Ignoring.".format(virtual, repository)) break try: virtuals[virtual].provided_by(package) except KeyError: virtuals[virtual] = VirtualPackage(virtual) virtuals[virtual].provided_by(package) if data['groups'] is not None: for group in data['groups']: try: registry.add_group(group) except VirtualPackageInRegistry: warning("name conflict between group {0} from repository '{1}' and virtual package {0}. Ignoring.".format(group, repository)) break except PackageInRegistry: warning("name conflict between group {0} from repository '{3}' and package {0}:{1} {2}. Ignoring.".format(name, architecture, version, repository)) break try: groups[group].add(package) except KeyError: groups[group] = Group(group) groups[group].add(package) units.add(package) for group in groups.iterkeys(): units.add(groups[group]) for virtual in virtuals.iterkeys(): units.add(virtuals[virtual]) return units
def set_default_db(cls, **args): message.warning('set default db on mysql')
def _uninstall(configuration, installed, package, keep_static): """ Performs a low-level package uninstallation. Parameters configuration a valid Craft Configuration object. installed Set having all currently installed units on the system. package the Package unit to be uninstalled. keep_static specifies whether the package's static files must be preserved or not. Raises UninstallError if any error occurs during the uninstallation. Returns True if the uninstallation was successfully completed. """ architecture = package.architecture name = package.name version = package.version db = configuration.db() root = configuration.root() if package not in installed: message.warning("'{0}' is not installed. Aborting...".format(package)) raise UninstallError(package) try: chdir(db + 'installed/' + name + '/' + version + '/' + architecture) except OSError: message.warning( "could not access the directory belonging to package '{0}'.". format(package)) raise UninstallError(package) package_files = [] try: handle = open('files') except IOError: pass else: package_files = handle.read().splitlines() handle.close() craft_files = [ db + 'installed/' + name + '/' + version + '/' + architecture + '/metadata.yml', db + 'installed/' + name + '/' + version + '/' + architecture + '/files', db + 'installed/' + name + '/' + version + '/' + architecture ] for each in package_files: if not access(root + each, W_OK): message.warning( "cannot remove file '{0}' from package '{1}'.".format( root + each, package)) raise UninstallError(package) for each in craft_files: if isfile(each) or isdir(each): if not access(each, W_OK): message.warning("cannot remove file '{0}'.".format(each)) raise UninstallError(package) if keep_static: for each in package.static(): try: message.simple("Attempting to save '{0}' as '{1}'...".format( root + each, root + each + '.craft-old')) rename(root + each, root + each + '.craft-old') except OSError: message.simple( "Could not preserve the following static file: '{0}'.". format(root + each)) message.simple( " '{0}' may exist already.".format(root + each + '.craft-old')) pass for each in package_files: try: if isdir(root + each): rmdir(root + each) elif isfile(root + each): remove(root + each) except OSError: pass for each in craft_files: try: if isdir(each): rmdir(each) elif isfile(each): remove(each) except OSError: pass try_to_remove = [ db + 'installed/' + name + '/' + version, db + 'installed/' + name ] for each in try_to_remove: try: rmdir(each) except OSError: break installed.remove(package) return True
def sync(configuration): """ Synchronises enabled repositories from a Craft configuration. Parameters configuration a valid Craft Configuration object. Raises SyncError in case of failure related to the actual synchronisation. ClearError if the internal clear() call fails and the previously set local repository cache is not properly cleared up prior to the actual synchronisation. Returns True in case the synchonisation has been successfully executed. """ try: clear(configuration, False) except ClearError: raise for name in configuration.repositories().iterkeys(): repository = configuration.repositories()[name] try: mkdir(configuration.db()+'available') except OSError: pass try: mkdir(configuration.db()+'available/'+name) except OSError: pass try: chdir(configuration.db()+'available/'+name) except OSError: raise SyncError try: environment.merge(repository['env']) except environment.EnvironmentError: message.warning("could not merge the environment variables associated to the repository '{0}'!".format(name)) except KeyError: pass handler = repository['handler'] for arch in configuration.architectures(): target = repository['target']+'/'+arch+'.yml' if system(handler+' '+target) != 0: message.warning("could not synchronise architecture '{0}' from repository '{1}'!".format(arch, name)) try: environment.purge(repository['env'].keys()) except environment.EnvironmentError: message.warning("could not purge the environment variables associated to the repository '{0}'!".format(name)) except KeyError: pass return True
def traceback(self, _traceback): import traceback for details in traceback.format_tb(_traceback): for detail in details.strip('\n').split('\n'): message.warning(detail)
def prologue(self) : message.message.verbosity(message.INT_DEBUG) message.warning('a warning %(c)d', c=666) message.note('a note') self.pdb()
def enqueue(cmd) : 'just execute here' message.debug('enqueue %(cmd)s', cmd=cmd) result = subprocess.Popen(cmd.split(' '), env=dict(os.environ, MDB='root='+str(self.mdb.get_root())+',parent='+str(self.mdb.log_id))).wait() if result > 0 : message.warning('process %(cmd)s returned non zero %(result)d', cmd=cmd, result=result)
def ignoring(arg): message.warning('Ignoring %(arg)s', arg=arg)
def epilogue(self) : message.warning('no epilogue defined')
def sync(configuration): """ Synchronises enabled repositories from a Craft configuration. Parameters configuration a valid Craft Configuration object. Raises SyncError in case of failure related to the actual synchronisation. ClearError if the internal clear() call fails and the previously set local repository cache is not properly cleared up prior to the actual synchronisation. Returns True in case the synchonisation has been successfully executed. """ try: clear(configuration, False) except ClearError: raise for name in configuration.repositories().iterkeys(): repository = configuration.repositories()[name] try: mkdir(configuration.db() + 'available') except OSError: pass try: mkdir(configuration.db() + 'available/' + name) except OSError: pass try: chdir(configuration.db() + 'available/' + name) except OSError: raise SyncError try: environment.merge(repository['env']) except environment.EnvironmentError: message.warning( "could not merge the environment variables associated to the repository '{0}'!" .format(name)) except KeyError: pass handler = repository['handler'] for arch in configuration.architectures(): target = repository['target'] + '/' + arch + '.yml' if system(handler + ' ' + target) != 0: message.warning( "could not synchronise architecture '{0}' from repository '{1}'!" .format(arch, name)) try: environment.purge(repository['env'].keys()) except environment.EnvironmentError: message.warning( "could not purge the environment variables associated to the repository '{0}'!" .format(name)) except KeyError: pass return True
def traceback(self, _traceback) : import traceback for details in traceback.format_tb(_traceback) : for detail in details.strip('\n').split('\n') : message.warning(detail)
def prologue(self): message.message.verbosity(message.INT_DEBUG) ignore() message.warning('a warning %(c)d', c=666) message.note('a note')
def hit(self): try: self.point.buckets[self.bucket_id()].hit() except axisValueError: message.warning('hit is not registered')
def download(configuration, packages): """ Download packages. Parameters configuration a valid Craft Configuration object. packages an iterable having the Package units to be downloaded. Raises RepositoryError in case an invalid repository was specified. DownloadError in case of failure. Returns True in case all specified packages have been successfully downloaded. """ grouped_packages = {} db = configuration.db() repositories = configuration.repositories() packages = list(packages) for package in list(packages): if isfile(db + '/available/' + package.repository + '/cache/' + package.name + '/' + package.version + '/' + package.architecture + '/package.tar.gz'): packages.remove(package) for package in packages: try: grouped_packages[package.repository].append(package) except KeyError: grouped_packages[package.repository] = [] grouped_packages[package.repository].append(package) for repository_name in grouped_packages.iterkeys(): try: repository = repositories[repository_name] except KeyError: raise RepositoryError(repository_name) try: environment.merge(repository['env']) except environment.EnvironmentError: message.warning( "could not merge the environment variables associated to the repository '{0}'!" .format(repository_name)) except KeyError: pass for package in grouped_packages[repository_name]: if package.has_checksum(): n = package.name v = package.version a = package.architecture directories = [ db + '/available', db + '/available/' + repository_name, db + '/available/' + repository_name + '/cache', db + '/available/' + repository_name + '/cache/' + n, db + '/available/' + repository_name + '/cache/' + n + '/' + v, db + '/available/' + repository_name + '/cache/' + n + '/' + v + '/' + a ] for directory in directories: try: mkdir(directory) except OSError: pass try: chdir(db + '/available/' + package.repository + '/cache/' + n + '/' + v + '/' + a) except OSError: raise DownloadError(package) if not isfile('package.tar.gz'): handler = repository['handler'] target = "{0}/{1}/{2}/{3}/package.tar.gz".format( repository['target'], n, v, a) if system(handler + ' ' + target) != 0: raise DownloadError(package) try: environment.purge(repository['env'].keys()) except environment.EnvironmentError: message.warning( "could not purge the environment variables associated to the repository '{0}'!" .format(package.repository)) except KeyError: pass return True
def __exit__(self, exc_type, exc_value, traceback): try: self.close() except: message.warning('cursor close raised exception %(exc)s', exc=sys.exc_info()[1])
def ignoring(arg) : message.warning('Ignoring %(arg)s', arg=arg)
def _install(configuration, installed, package, filepath): """ Performs a low-level package installation. Parameters configuration a valid Craft Configuration object. installed Set having all currently installed units on the system. package the Package unit to be installed. filepath absolute filesystem path of the package's archive to be installed. Raises InstallError if any error occurs during the installation. OSError if, in case an operation has failed, it is not possible to cleanly recover from it. Returns True if the installation was successfully completed. """ architecture = package.architecture name = package.name version = package.version db = configuration.db() package_directory = db+'installed/'+name+'/'+version+'/'+architecture craft_directories = [ db+'installed/', db+'installed/'+name, db+'installed/'+name+'/'+version ] for each in craft_directories: try: mkdir(each) except OSError: pass if package in installed: message.warning("'{0}' is already installed. Aborting...".format(package)) raise InstallError(package) try: mkdir(package_directory) except OSError: message.warning("failed to create internal directory while installing '{0}'. Aborting...".format(package)) raise InstallError(package) try: chdir(package_directory) except OSError: message.warning("could not access the directory belonging to package '{0}'. Aborting...".format(package)) raise InstallError(package) sha1 = package.has_checksum('sha1') if sha1: if not filepath: message.warning("missing archive filepath for package '{0}'. Aborting...".format(package)) raise InstallError(package) if not checksum.sha1(filepath, sha1): message.warning("inconsistent archive provided for package '{0}'. Aborting...".format(package)) try: rmtree(package_directory) except OSError: raise raise InstallError(package) package_files = archive.getfiles(filepath) if not package_files: message.warning("empty archive provided for package '{0}'. Aborting...".format(package)) try: rmtree(package_directory) except OSError: raise raise InstallError(package) try: package_files_dump_handle = open('files', 'w') except IOError: message.warning("could not write the file list for package '{0}'. Aborting...".format(package)) try: rmtree(package_directory) except OSError: raise raise InstallError(package) else: for each in package_files: package_files_dump_handle.write(each+'\n') package_files_dump_handle.close() if not archive.extract(filepath, configuration.root()): message.warning("could not extract the archive provided for package '{0}'. Aborting...".format(package)) try: rmtree(package_directory) except OSError: raise raise InstallError(package) try: if not dump.package(package, 'metadata.yml'): message.warning("failed to write metadata.yml for package '{0}'. Aborting...".format(package)) raise InstallError(package) except IOError: raise installed.add(package) return True
def _uninstall(configuration, installed, package, keep_static): """ Performs a low-level package uninstallation. Parameters configuration a valid Craft Configuration object. installed Set having all currently installed units on the system. package the Package unit to be uninstalled. keep_static specifies whether the package's static files must be preserved or not. Raises UninstallError if any error occurs during the uninstallation. Returns True if the uninstallation was successfully completed. """ architecture = package.architecture name = package.name version = package.version db = configuration.db() root = configuration.root() if package not in installed: message.warning("'{0}' is not installed. Aborting...".format(package)) raise UninstallError(package) try: chdir(db+'installed/'+name+'/'+version+'/'+architecture) except OSError: message.warning("could not access the directory belonging to package '{0}'.".format(package)) raise UninstallError(package) package_files = [] try: handle = open('files') except IOError: pass else: package_files = handle.read().splitlines() handle.close() craft_files = [ db+'installed/'+name+'/'+version+'/'+architecture+'/metadata.yml', db+'installed/'+name+'/'+version+'/'+architecture+'/files', db+'installed/'+name+'/'+version+'/'+architecture ] for each in package_files: if not access(root+each, W_OK): message.warning("cannot remove file '{0}' from package '{1}'.".format(root+each, package)) raise UninstallError(package) for each in craft_files: if isfile(each) or isdir(each): if not access(each, W_OK): message.warning("cannot remove file '{0}'.".format(each)) raise UninstallError(package) if keep_static: for each in package.static(): try: message.simple("Attempting to save '{0}' as '{1}'...".format(root+each, root+each+'.craft-old')) rename(root+each, root+each+'.craft-old') except OSError: message.simple("Could not preserve the following static file: '{0}'.".format(root+each)) message.simple(" '{0}' may exist already.".format(root+each+'.craft-old')) pass for each in package_files: try: if isdir(root+each): rmdir(root+each) elif isfile(root+each): remove(root+each) except OSError: pass for each in craft_files: try: if isdir(each): rmdir(each) elif isfile(each): remove(each) except OSError: pass try_to_remove = [ db+'installed/'+name+'/'+version, db+'installed/'+name ] for each in try_to_remove: try: rmdir(each) except OSError: break installed.remove(package) return True
def __init__(self, log_ids=[], test_ids=[], xml=None, threshold=0, robust=False, previous=None): 'log_ids is a list of regression roots' self.log_ids = log_ids s_log_ids = ','.join(map(str, log_ids)) self.tests = mdb.connection().row_cursor() if log_ids: # create table of individual runs, but not root node as this may have already summarised coverage self.tests.execute( 'CREATE TEMPORARY TABLE ' + self.invs + ' AS SELECT l1.*, goal_id AS master FROM log AS l0 JOIN log AS l1 ON (l0.log_id = l1.root) LEFT OUTER JOIN master ON (l1.log_id = master.log_id) WHERE l1.root IN (' + s_log_ids + ');') self.tests.execute('SELECT count(*) AS children FROM ' + self.invs) children = self.tests.fetchone().children if children: message.information( '%(log_ids)s %(has)s %(children)d children', log_ids=s_log_ids, children=children, has='have' if len(log_ids) > 1 else 'has') # append individual runs as given by test_ids if xml: xml_ids = xml.xml.xpath('/optimize/test/log_id/text()') else: xml_ids = [] if test_ids or xml_ids: s_test_ids = ','.join(map(str, test_ids + xml_ids)) create = ('INSERT INTO ' + self.invs) if log_ids else ('CREATE TEMPORARY TABLE ' + self.invs + ' AS') self.tests.execute( create + ' SELECT log.*, IFNULL(goal_id, goal.log_id) AS master FROM log LEFT OUTER JOIN master ON (log.log_id = master.log_id) LEFT OUTER JOIN goal ON (log.log_id = goal.log_id) WHERE log.log_id IN (' + s_test_ids + ') GROUP BY log_id;') self.tests.execute('SELECT count(*) AS tests FROM ' + self.invs) tests = self.tests.fetchone().tests if tests < 1: message.fatal('no tests') message.information('starting with %(count)d tests in table %(table)s', count=tests, table=self.invs) # check congruency self.cvg = mdb.connection().row_cursor() rows = self.cvg.execute( "SELECT md5_self AS md5, 'md5_self' AS type, invs.master, invs.root FROM point JOIN " + self.invs + " AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;" ) md5 = self.cvg.fetchall() if not md5: message.fatal('no master') elif len(md5) > 1: message.fatal('md5 of multiple masters do not match') else: message.debug('md5 query returns %(rows)d', rows=rows) self.master = mdb.accessor(md5=md5[0]) self.cvg.execute( "SELECT DISTINCT(md5_axes) AS md5, 'md5_axes' AS type, invs.master, invs.root FROM point JOIN " + self.invs + " AS invs ON (invs.master = point.log_id AND point.parent IS NULL) GROUP BY md5;" ) md5 = self.cvg.fetchall() if len(md5) > 1: message.fatal('md5 of multiple axis masters do not match') self.master.axes = md5[0] # create status table, collating goal & hits self.cvg.execute( 'CREATE TEMPORARY TABLE ' + self.covg + ' (bucket_id INTEGER NOT NULL PRIMARY KEY, goal INTEGER, hits INTEGER, total_hits INTEGER, rhits INTEGER, max_hits INTEGER, tests INTEGER);' ) try: self.threshold = float(threshold) except: self.threshold = 0.0 message.warning( 'cannot convert threshold value given "%(arg)s" to float because %(exception)s, using %(threshold)2.1f', arg=threshold, exception=sys.exc_info()[0], threshold=self.threshold) self.robust = robust self.previous = previous
def enqueue(self, cmd) : 'just execute here' message.debug('enqueue %(cmd)s', cmd=cmd) result = subprocess.Popen(cmd.split(' '), env=dict(os.environ, MDB='root='+str(mdb_conn.get_root())+',parent='+str(mdb_conn.log_id), PYTHONPATH=os.environ['PYTHONPATH']+':../python')).wait() if result > 0 : message.warning('process %(cmd)s returned non zero %(result)d', cmd=cmd, result=result)