def __init__(self, path, size_min=4800, size_max=5200): # 1. Keep the path if not lfs.is_folder(path): error = '"%s" should be a folder, but it is not' % path raise ValueError, error folder = lfs.open(path) self.path = str(folder.path) # 2. Keep the path to the data self.path_data = '%s/database/' % self.path if not lfs.is_folder(self.path_data): error = '"%s" should be a folder, but it is not' % self.path_data raise ValueError, error # 3. Initialize the database, but chrooted self.fs = lfs.open(self.path_data) # 4. New interface to Git self.worktree = open_worktree(self.path_data) # 5. A mapping from key to handler self.cache = LRUCache(size_min, size_max, automatic=False) # 6. The git cache self.git_cache = LRUCache(900, 1100)
def make_version(cwd='.'): """This function finds out the version number from the source, this will be written to the 'version.txt' file, which will be read once the software is installed to get the version number. """ worktree = open_worktree(cwd) # The name of the active branch branch = worktree.get_branch_name() if branch is None: return None # The tag description = worktree.git_describe() # The version name if description: tag, n, commit = description if tag.startswith(branch): version = tag else: version = '%s-%s' % (branch, tag) # Exact match if n == 0: return version else: version = branch # Get the timestamp head = worktree.get_metadata() timestamp = head['committer_date'] timestamp = timestamp.strftime('%Y%m%d%H%M') return '%s-%s' % (version, timestamp)
def make_git_database(path, size_min, size_max, fields=None): """Create a new empty Git database if the given path does not exists or is a folder. If the given path is a folder with content, the Git archive will be initialized and the content of the folder will be added to it in a first commit. """ path = lfs.get_absolute_path(path) # Git init open_worktree('%s/database' % path, init=True) # The catalog if fields is None: fields = get_register_fields() catalog = make_catalog('%s/catalog' % path, fields) # Ok database = RWDatabase(path, size_min, size_max) database.catalog = catalog return database
def get_manifest(): worktree = open_worktree('.', soft=True) if worktree: exclude = frozenset(['.gitignore']) return [ x for x in worktree.get_filenames() if x not in exclude ] # No git: find out source files config = get_config() target_languages = config.get_value('target_languages') exclude = frozenset(['.git', 'build', 'dist']) bad_files = compile('.*(~|pyc|%s)$' % '|'.join(target_languages)) return get_files(exclude, filter=lambda x: not bad_files.match(x))
def create_graph(): """Create graph of code quality evolution """ t0 = time() cwd = getcwd() project_name = cwd.split('/')[-1] # We copy project to tmp (for security) tmp_directory = '%s/ipkg_quality.git' % mkdtemp() call(['git', 'clone', cwd, tmp_directory]) chdir(tmp_directory) worktree = open_worktree(tmp_directory) # First step: we create a list of statistics statistics = {} commits = worktree.git_log(reverse=True) print 'Script will analyse %s commits.' % len(commits) for commit in commits: # We move to a given commit call(['git', 'reset', '--hard', commit['sha']]) # Print script evolution stdout.write('.') stdout.flush() # We list files filenames = worktree.get_filenames() filenames = [ x for x in filenames if x.endswith('.py') ] # We get code quality for this files stats, files_db = analyse(filenames, ignore_errors=True) metadata = worktree.get_metadata() date_time = metadata['committer_date'] commit_date = date(date_time.year, date_time.month, date_time.day) if commit_date not in statistics: statistics[commit_date] = stats else: # Same day => Avg for key in statistics[commit_date]: avg = (statistics[commit_date][key] + stats[key])/2 statistics[commit_date][key] = avg print # Get dates values = [] dates = statistics.keys() dates.sort() for a_date in dates: values.append(statistics[a_date]) # Base graph informations base_title = '[%s %s]' % (project_name, worktree.get_branch_name()) # We generate graphs chdir(cwd) for problem_dict in ['code_length', 'aesthetics_problems', 'exception_problems', 'import_problems']: current_problems = eval(problem_dict) graph_title = '%s %s' % (base_title, current_problems['title']) lines = [] labels = [] fig = Figure() graph = fig.add_subplot(111) for key in current_problems['keys']: if current_problems['pourcent']: problem_values = [((x[key]*100.0)/x['lines']) for x in values] else: problem_values = [x[key] for x in values] lines.append(graph.plot_date(dates, problem_values, '-')) labels.append(current_problems['keys'][key]) graph.set_title(graph_title) graph.xaxis.set_major_formatter(DateFormatter("%b '%y'")) if current_problems['pourcent']: graph.set_ylabel('Pourcent') graph.yaxis.set_major_formatter(FormatStrFormatter("%5.02f %%")) else: graph.set_ylabel('Quantity') graph.set_xlabel('') graph.autoscale_view() graph.grid(True) fig.autofmt_xdate() fig.set_figheight(fig.get_figheight()+5) legend = fig.legend(lines, labels, loc=8, axespad=0.0) legend.get_frame().set_linewidth(0) canvas = FigureCanvasAgg(fig) destination = 'graph_%s.png' % problem_dict canvas.print_figure(destination, dpi=80) print '%s -> %s ' % (graph_title, destination) t1 = time() print 'Generation time: %d minutes.' % ((t1 - t0)/60)
help='number of worse files showed, 0 for all') # Show lines parser.add_option('-s', '--show-lines', action='store_true', dest='show_lines', default=False, help='give the line of each problem found') # Show graph parser.add_option('-g', '--graph', action="store_true", dest='graph', default=False, help='create graphs of code quality evolution.') options, args = parser.parse_args() # Filenames worktree = open_worktree('.', soft=True) if args: filenames = set([]) for arg in args: filenames = filenames.union(glob(arg)) filenames = list(filenames) elif worktree: filenames = worktree.get_filenames() filenames = [ x for x in filenames if x.endswith('.py') ] else: filenames = [] for path in lfs.traverse(): if lfs.is_file(path) and basename(path).endswith('.py'): filenames.append(relpath(path)) # Check options
def build(): worktree = open_worktree('.', soft=True) # Try using git facilities if not worktree: print "Warning: not using git." # Read configuration for languages config = get_config() source_language = config.get_value('source_language', default='en') target_languages = config.get_value('target_languages') # (1) Initialize the manifest file manifest = [ x for x in get_manifest() if not islink(x) ] manifest.append('MANIFEST') # Find out the version string if worktree: version = make_version() open('version.txt', 'w').write(version) print '* Version:', version manifest.append('version.txt') # (2) Internationalization bad_templates = [] if lfs.exists('locale'): # Build MO files print '* Compile message catalogs:', stdout.flush() for lang in (source_language,) + target_languages: print lang, stdout.flush() call([ 'msgfmt', 'locale/%s.po' % lang, '-o', 'locale/%s.mo' % lang]) # Add to the manifest manifest.append('locale/%s.mo' % lang) print # Load message catalogs message_catalogs = {} for lang in target_languages: path = 'locale/%s.po' % lang handler = ro_database.get_handler(path) message_catalogs[lang] = (handler, lfs.get_mtime(path)) # Build the templates in the target languages good_files = compile('.*\\.x.*ml.%s$' % source_language) exclude = frozenset(['.git', 'build', 'docs', 'dist']) lines = get_files(exclude, filter=lambda x: good_files.match(x)) lines = list(lines) if lines: print '* Build XHTML files', stdout.flush() for path in lines: # Load the handler src_mtime = lfs.get_mtime(path) src = ro_database.get_handler(path, XHTMLFile) done = False # Build the translation n = path.rfind('.') error = False for language in target_languages: po, po_mtime = message_catalogs[language] dst = '%s.%s' % (path[:n], language) # Add to the manifest manifest.append(dst) # Skip the file if it is already up-to-date if lfs.exists(dst): dst_mtime = lfs.get_mtime(dst) if dst_mtime > src_mtime and dst_mtime > po_mtime: continue try: data = src.translate(po) except StandardError: error = True bad_templates.append((path, exc_info())) else: open(dst, 'w').write(data) done = True # Done if error is True: stdout.write('E') elif done is True: stdout.write('*') else: stdout.write('.') stdout.flush() print # (3) Build the manifest file manifest.sort() lines = [ x + '\n' for x in manifest ] open('MANIFEST', 'w').write(''.join(lines)) print '* Build MANIFEST file (list of files to install)' # (4) Show errors if bad_templates: print print '***********************************************************' print 'The following templates could not be translated' print '***********************************************************' for (path, (type, value, traceback)) in bad_templates: print print path print_exception(type, value, traceback)