def handle_noargs(self, **kwargs):
     try:
         verbose = int(kwargs['verbosity']) > 0
     except (KeyError, TypeError, ValueError):
         verbose = True
     
     for release in DocumentRelease.objects.all():
         if verbose:
             print "Updating %s..." % release
             
         destdir = Path(settings.DOCS_BUILD_ROOT).child(release.lang, release.version)
         if not destdir.exists():
             destdir.mkdir(parents=True)
         
         # Make an SCM checkout/update into the destination directory.
         # Do this dynamically in case we add other SCM later.
         getattr(self, 'update_%s' % release.scm)(release.scm_url, destdir)
         
         # Make the directory for the JSON files - sphinx-build doesn't
         # do it for us, apparently.
         json_build_dir = destdir.child('_build', 'json')
         if not json_build_dir.exists():
             json_build_dir.mkdir(parents=True)
         
         # "Shell out" (not exactly, but basically) to sphinx-build.
         sphinx.cmdline.main(['sphinx-build',
             '-b', 'json',      # Use the JSON builder
             '-q',              # Be vewy qwiet
             destdir,           # Source file directory
             json_build_dir,    # Destination directory
         ])
Ejemplo n.º 2
0
def run_benchmarks(control, benchmark_dir, benchmarks, trials, record_dir=None, profile_dir=None):
    if benchmarks:
        print "Running benchmarks: %s" % " ".join(benchmarks)
    else:
        print "Running all benchmarks"

    if record_dir:
        record_dir = Path(record_dir).expand().absolute()
        if not record_dir.exists():
            raise ValueError('Recording directory "%s" does not exist' % record_dir)
        print "Recording data to '%s'" % record_dir

    control_label = get_django_version(control, vcs=None)
    branch_info =  ""
    print "Benchmarking: Django %s (in %s%s)" % (control_label, branch_info, control)
    print "    Control: %s" % cpython
    print "    Experiment: %s" % pypy
    print
    
    control_env = {'PYTHONPATH': '.:%s:%s' % (Path(control).absolute(), Path(benchmark_dir))}
    
    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark.name in benchmarks:
            print "Running '%s' benchmark ..." % benchmark.name
            settings_mod = '%s.settings' % benchmark.name
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env = control_env.copy()
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "cpython-%s" % benchmark.name)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "pypy-%s" % benchmark.name)
            try:
                control_data = run_benchmark(benchmark, trials, control_env, cpython)
                experiment_data = run_benchmark(benchmark, trials, experiment_env, pypy)
            except SkipBenchmark, reason:
                print "Skipped: %s\n" % reason
                continue

            options = argparse.Namespace(
                track_memory = False,
                diff_instrumentation = False,
                benchmark_name = benchmark.name,
                disable_timelines = True,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data, options)
            if record_dir:
                record_benchmark_results(
                    dest = record_dir.child('%s.json' % benchmark.name),
                    name = benchmark.name,
                    result = result,
                    control = 'cpython',
                    experiment = 'pypy',
                    control_data = control_data,
                    experiment_data = experiment_data,
                )
            print format_benchmark_result(result, len(control_data.runtimes))
            print
Ejemplo n.º 3
0
def dict2dir(dir, dic, mode="w"):
    dir = FSPath(dir)
    if not dir.exists():
        dir.mkdir()
    for filename, content in dic.iteritems():
        p = FSPath(dir, filename)
        if isinstance(content, dict):
            dict2dir(p, content)
            continue
        f = open(p, mode)
        f.write(content)
        f.close()
Ejemplo n.º 4
0
def require_dir(config, key, create_if_missing=False):
    from unipath import FSPath as Path
    try:
        dir = config[key]
    except KeyError:
        msg = "config option '%s' missing"
        raise KeyError(msg % key)
    dir = Path(config[key])
    if not dir.exists():
        dir.mkdir(parents=True)
    if not dir.isdir():
        msg = ("directory '%s' is missing or not a directory "
               "(from config option '%s')")
        tup = dir, key
        raise OSError(msg % tup)
Ejemplo n.º 5
0
    def handle_noargs(self, **kwargs):
        try:
            verbosity = int(kwargs['verbosity'])
        except (KeyError, TypeError, ValueError):
            verbosity = 1

        builders = ['json', 'html']

        # Somehow, bizarely, there's a bug in Sphinx such that if I try to
        # build 1.0 before other versions, things fail in weird ways. However,
        # building newer versions first works. I suspect Sphinx is hanging onto
        # some global state. Anyway, we can work around it by making sure that
        # "dev" builds before "1.0". This is ugly, but oh well.
        for release in DocumentRelease.objects.order_by('-version'):
            if verbosity >= 1:
                print "Updating %s..." % release

            # checkout_dir is shared for all languages.
            checkout_dir = Path(settings.DOCS_BUILD_ROOT).child(release.version)
            parent_build_dir = Path(settings.DOCS_BUILD_ROOT).child(release.lang, release.version)
            if not checkout_dir.exists():
                checkout_dir.mkdir(parents=True)
            if not parent_build_dir.exists():
                parent_build_dir.mkdir(parents=True)

            #
            # Update the release from SCM.
            #

            # Make an SCM checkout/update into the destination directory.
            # Do this dynamically in case we add other SCM later.
            getattr(self, 'update_%s' % release.scm)(release.scm_url, checkout_dir)

            if release.docs_subdir:
                source_dir = checkout_dir.child(*release.docs_subdir.split('/'))
            else:
                source_dir = checkout_dir

            if release.lang != 'en':
                scm_url = release.scm_url.replace('django.git', 'django-docs-translations.git')
                trans_dir = checkout_dir.child('django-docs-translation')
                if not trans_dir.exists():
                    trans_dir.mkdir()
                getattr(self, 'update_%s' % release.scm)(scm_url, trans_dir)
                if not source_dir.child('locale').exists():
                    source_dir.child('locale').write_link(trans_dir.child('translations'))
                subprocess.call("cd %s && make translations" % trans_dir, shell=True)

            if release.is_default:
                # Build the pot files (later retrieved by Transifex)
                builders.append('gettext')

            #
            # Use Sphinx to build the release docs into JSON and HTML documents.
            #
            for builder in builders:
                # Wipe and re-create the build directory. See #18930.
                build_dir = parent_build_dir.child('_build', builder)
                if build_dir.exists():
                    shutil.rmtree(build_dir)
                build_dir.mkdir(parents=True)

                if verbosity >= 2:
                    print "  building %s (%s -> %s)" % (builder, source_dir, build_dir)
                subprocess.call(['sphinx-build',
                    '-b', builder,
                    '-D', 'language=%s' % release.lang,
                    '-q',              # Be vewy qwiet
                    source_dir,        # Source file directory
                    build_dir,         # Destination directory
                ])

            #
            # Create a zip file of the HTML build for offline reading.
            # This gets moved into MEDIA_ROOT for downloading.
            #
            html_build_dir = parent_build_dir.child('_build', 'html')
            zipfile_name = 'django-docs-%s-%s.zip' % (release.version, release.lang)
            zipfile_path = Path(settings.MEDIA_ROOT).child('docs', zipfile_name)
            if not zipfile_path.parent.exists():
                zipfile_path.parent.mkdir(parents=True)
            if verbosity >= 2:
                print "  build zip (into %s)" % zipfile_path

            def zipfile_inclusion_filter(f):
                return f.isfile() and '.doctrees' not in f.components()

            with closing(zipfile.ZipFile(zipfile_path, 'w')) as zf:
                for f in html_build_dir.walk(filter=zipfile_inclusion_filter):
                    zf.write(f, html_build_dir.rel_path_to(f))

            #
            # Copy the build results to the directory used for serving
            # the documentation in the least disruptive way possible.
            #
            build_dir = parent_build_dir.child('_build')
            built_dir = parent_build_dir.child('_built')
            subprocess.check_call(['rsync', '--archive', '--delete',
                    '--link-dest=' + build_dir, build_dir + '/', built_dir])

            #
            # Rebuild the imported document list and search index.
            #
            if not kwargs['reindex']:
                continue

            if verbosity >= 2:
                print "  reindexing..."

            # Build a dict of {path_fragment: document_object}. We'll pop values
            # out of this dict as we go which'll make sure we know which
            # remaining documents need to be deleted (and unindexed) later on.
            documents = dict((doc.path, doc) for doc in release.documents.all())

            # Walk the tree we've just built looking for ".fjson" documents
            # (just JSON, but Sphinx names them weirdly). Each one of those
            # documents gets a corresponding Document object created which
            # we'll then ask Sphinx to reindex.
            #
            # We have to be a bit careful to reverse-engineer the correct
            # relative path component, especially for "index" documents,
            # otherwise the search results will be incorrect.
            json_built_dir = parent_build_dir.child('_built', 'json')
            for built_doc in json_built_dir.walk():
                if built_doc.isfile() and built_doc.ext == '.fjson':

                    # Convert the built_doc path which is now an absolute
                    # path (i.e. "/home/docs/en/1.2/_built/ref/models.json")
                    # into a path component (i.e. "ref/models").
                    path = json_built_dir.rel_path_to(built_doc)
                    if path.stem == 'index':
                        path = path.parent
                    path = str(path.parent.child(path.stem))

                    # Read out the content and create a new Document object for
                    # it. We'll strip the HTML tags here (for want of a better
                    # place to do it).
                    with open(built_doc) as fp:
                        json_doc = json.load(fp)
                        try:
                            json_doc['body']  # Just to make sure it exists.
                            title = unescape_entities(strip_tags(json_doc['title']))
                        except KeyError, ex:
                            if verbosity >= 2:
                                print "Skipping: %s (no %s)" % (path, ex.args[0])
                            continue

                    doc = documents.pop(path, Document(path=path, release=release))
                    doc.title = title
                    doc.save()
                    haystack.site.update_object(doc)

            # Clean up any remaining documents.
            for doc in documents.values():
                if verbosity >= 2:
                    print "Deleting:", doc
                haystack.site.remove_object(doc)
                doc.delete()
Ejemplo n.º 6
0
    def handle_noargs(self, **kwargs):
        try:
            verbosity = int(kwargs['verbosity'])
        except (KeyError, TypeError, ValueError):
            verbosity = 1

        # Somehow, bizarely, there's a bug in Sphinx such that if I try to
        # build 1.0 before other versions, things fail in weird ways. However,
        # building newer versions first works. I suspect Sphinx is hanging onto
        # some global state. Anyway, we can work around it by making sure that
        # "dev" builds before "1.0". This is ugly, but oh well.
        for release in DocumentRelease.objects.order_by('-version'):
            if verbosity >= 1:
                print "Updating %s..." % release

            # checkout_dir is shared for all languages.
            checkout_dir = Path(settings.DOCS_BUILD_ROOT).child(
                release.version)
            parent_build_dir = Path(settings.DOCS_BUILD_ROOT).child(
                release.lang, release.version)
            if not checkout_dir.exists():
                checkout_dir.mkdir(parents=True)
            if not parent_build_dir.exists():
                parent_build_dir.mkdir(parents=True)

            #
            # Update the release from SCM.
            #

            # Make an SCM checkout/update into the destination directory.
            # Do this dynamically in case we add other SCM later.
            getattr(self, 'update_%s' % release.scm)(release.scm_url,
                                                     checkout_dir)

            if release.docs_subdir:
                source_dir = checkout_dir.child(
                    *release.docs_subdir.split('/'))
            else:
                source_dir = checkout_dir

            if release.lang != 'en':
                scm_url = release.scm_url.replace(
                    'django.git', 'django-docs-translations.git')
                trans_dir = checkout_dir.child('django-docs-translation')
                if not trans_dir.exists():
                    trans_dir.mkdir()
                getattr(self, 'update_%s' % release.scm)(scm_url, trans_dir)
                if not source_dir.child('locale').exists():
                    source_dir.child('locale').write_link(
                        trans_dir.child('translations'))
                subprocess.call("cd %s && make translations" % trans_dir,
                                shell=True)

            #
            # Use Sphinx to build the release docs into JSON and HTML documents.
            #
            for builder in ('json', 'html'):
                # Wipe and re-create the build directory. See #18930.
                build_dir = parent_build_dir.child('_build', builder)
                if build_dir.exists():
                    shutil.rmtree(build_dir)
                build_dir.mkdir(parents=True)

                # "Shell out" (not exactly, but basically) to sphinx-build.
                if verbosity >= 2:
                    print "  building %s (%s -> %s)" % (builder, source_dir,
                                                        build_dir)
                sphinx.cmdline.main([
                    'sphinx-build',
                    '-b',
                    builder,
                    '-D',
                    'language=%s' % release.lang,
                    '-q',  # Be vewy qwiet
                    source_dir,  # Source file directory
                    build_dir,  # Destination directory
                ])

            #
            # Create a zip file of the HTML build for offline reading.
            # This gets moved into MEDIA_ROOT for downloading.
            #
            html_build_dir = parent_build_dir.child('_build', 'html')
            zipfile_name = 'django-docs-%s-%s.zip' % (release.version,
                                                      release.lang)
            zipfile_path = Path(settings.MEDIA_ROOT).child(
                'docs', zipfile_name)
            if not zipfile_path.parent.exists():
                zipfile_path.parent.mkdir(parents=True)
            if verbosity >= 2:
                print "  build zip (into %s)" % zipfile_path

            def zipfile_inclusion_filter(f):
                return f.isfile() and '.doctrees' not in f.components()

            with closing(zipfile.ZipFile(zipfile_path, 'w')) as zf:
                for f in html_build_dir.walk(filter=zipfile_inclusion_filter):
                    zf.write(f, html_build_dir.rel_path_to(f))

            #
            # Copy the build results to the directory used for serving
            # the documentation in the least disruptive way possible.
            #
            build_dir = parent_build_dir.child('_build')
            built_dir = parent_build_dir.child('_built')
            subprocess.check_call([
                'rsync', '--archive', '--delete', '--link-dest=' + build_dir,
                build_dir + '/', built_dir
            ])

            #
            # Rebuild the imported document list and search index.
            #
            if not kwargs['reindex']:
                continue

            if verbosity >= 2:
                print "  reindexing..."

            # Build a dict of {path_fragment: document_object}. We'll pop values
            # out of this dict as we go which'll make sure we know which
            # remaining documents need to be deleted (and unindexed) later on.
            documents = dict(
                (doc.path, doc) for doc in release.documents.all())

            # Walk the tree we've just built looking for ".fjson" documents
            # (just JSON, but Sphinx names them weirdly). Each one of those
            # documents gets a corresponding Document object created which
            # we'll then ask Sphinx to reindex.
            #
            # We have to be a bit careful to reverse-engineer the correct
            # relative path component, especially for "index" documents,
            # otherwise the search results will be incorrect.
            json_built_dir = parent_build_dir.child('_built', 'json')
            for built_doc in json_built_dir.walk():
                if built_doc.isfile() and built_doc.ext == '.fjson':

                    # Convert the built_doc path which is now an absolute
                    # path (i.e. "/home/docs/en/1.2/_built/ref/models.json")
                    # into a path component (i.e. "ref/models").
                    path = json_built_dir.rel_path_to(built_doc)
                    if path.stem == 'index':
                        path = path.parent
                    path = str(path.parent.child(path.stem))

                    # Read out the content and create a new Document object for
                    # it. We'll strip the HTML tags here (for want of a better
                    # place to do it).
                    with open(built_doc) as fp:
                        json_doc = json.load(fp)
                        try:
                            json_doc['body']  # Just to make sure it exists.
                            title = unescape_entities(
                                strip_tags(json_doc['title']))
                        except KeyError, ex:
                            if verbosity >= 2:
                                print "Skipping: %s (no %s)" % (path,
                                                                ex.args[0])
                            continue

                    doc = documents.pop(path,
                                        Document(path=path, release=release))
                    doc.title = title
                    doc.save()
                    haystack.site.update_object(doc)

            # Clean up any remaining documents.
            for doc in documents.values():
                if verbosity >= 2:
                    print "Deleting:", doc
                haystack.site.remove_object(doc)
                doc.delete()
Ejemplo n.º 7
0
def run_benchmarks(control,
                   experiment,
                   benchmark_dir,
                   benchmarks,
                   trials,
                   vcs=None,
                   record_dir=None,
                   profile_dir=None,
                   continue_on_error=False):
    if benchmarks:
        print "Running benchmarks: %s" % " ".join(benchmarks)
    else:
        print "Running all benchmarks"

    if record_dir:
        record_dir = Path(record_dir).expand().absolute()
        if not record_dir.exists():
            raise ValueError('Recording directory "%s" does not exist' %
                             record_dir)
        print "Recording data to '%s'" % record_dir

    control_label = get_widgy_version(control, vcs=vcs)
    experiment_label = get_widgy_version(experiment, vcs=vcs)
    branch_info = "%s branch " % vcs if vcs else ""
    print "Control: Widgy %s (in %s%s)" % (control_label, branch_info, control)
    print "Experiment: Widgy %s (in %s%s)" % (experiment_label, branch_info,
                                              experiment)
    print

    # Calculate the subshell envs that we'll use to execute the
    # benchmarks in.
    if vcs:
        control_env = {
            'PYTHONPATH':
            '%s:%s' % (Path.cwd().absolute(), Path(benchmark_dir)),
        }
        experiment_env = control_env.copy()
    else:
        control_env = {
            'PYTHONPATH':
            '%s:%s' % (Path(control).absolute(), Path(benchmark_dir))
        }
        experiment_env = {
            'PYTHONPATH':
            '%s:%s' % (Path(experiment).absolute(), Path(benchmark_dir))
        }

    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark.name in benchmarks:
            print "Running '%s' benchmark ..." % benchmark.name
            settings_mod = '%s.settings' % benchmark.name
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = Path(
                    profile_dir, "con-%s" % benchmark.name)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = Path(
                    profile_dir, "exp-%s" % benchmark.name)
            try:
                if vcs: switch_to_branch(vcs, control)
                control_data = run_benchmark(benchmark, trials, control_env)
                if vcs: switch_to_branch(vcs, experiment)
                experiment_data = run_benchmark(benchmark, trials,
                                                experiment_env)
            except SkipBenchmark, reason:
                print "Skipped: %s\n" % reason
                continue
            except RuntimeError, error:
                if continue_on_error:
                    print "Failed: %s\n" % error
                    continue
                raise

            options = argparse.Namespace(
                track_memory=False,
                diff_instrumentation=False,
                benchmark_name=benchmark.name,
                disable_timelines=True,
                control_label=control_label,
                experiment_label=experiment_label,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data,
                                               options)
            if record_dir:
                record_benchmark_results(
                    dest=record_dir.child('%s.json' % benchmark.name),
                    name=benchmark.name,
                    result=result,
                    control=control_label,
                    experiment=experiment_label,
                    control_data=control_data,
                    experiment_data=experiment_data,
                )
            print format_benchmark_result(result, len(control_data.runtimes))
            print
Ejemplo n.º 8
0
def run_benchmarks(control, experiment, benchmark_dir, benchmarks, trials, vcs=None, record_dir=None, profile_dir=None):
    if benchmarks:
        print "Running benchmarks: %s" % " ".join(benchmarks)
    else:
        print "Running all benchmarks"

    if record_dir:
        record_dir = Path(record_dir).expand().absolute()
        if not record_dir.exists():
            raise ValueError('Recording directory "%s" does not exist' % record_dir)
        print "Recording data to '%s'" % record_dir

    control_label = get_django_version(control, vcs=vcs)
    experiment_label = get_django_version(experiment, vcs=vcs)
    branch_info = "%s branch " % vcs if vcs else ""
    print "Control: Django %s (in %s%s)" % (control_label, branch_info, control)
    print "Experiment: Django %s (in %s%s)" % (experiment_label, branch_info, experiment)
    print

    # Calculate the subshell envs that we'll use to execute the
    # benchmarks in.
    if vcs:
        control_env = experiment_env = {
            'PYTHONPATH': '%s:%s' % (Path.cwd().absolute(), Path(benchmark_dir)),
        }
    else:
        control_env = {'PYTHONPATH': '%s:%s' % (Path(control).absolute(), Path(benchmark_dir))}
        experiment_env = {'PYTHONPATH': '%s:%s' % (Path(experiment).absolute(), Path(benchmark_dir))}

    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark.name in benchmarks:
            print "Running '%s' benchmark ..." % benchmark.name
            settings_mod = '%s.settings' % benchmark.name
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "con-%s" % benchmark.name)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "exp-%s" % benchmark.name)
            try:
                if vcs: switch_to_branch(vcs, control)
                control_data = run_benchmark(benchmark, trials, control_env)
                if vcs: switch_to_branch(vcs, experiment)
                experiment_data = run_benchmark(benchmark, trials, experiment_env)
            except SkipBenchmark, reason:
                print "Skipped: %s\n" % reason
                continue

            options = argparse.Namespace(
                track_memory = False,
                diff_instrumentation = False,
                benchmark_name = benchmark.name,
                disable_timelines = True,
                control_label = control_label,
                experiment_label = experiment_label,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data, options)
            if record_dir:
                record_benchmark_results(
                    dest = record_dir.child('%s.json' % benchmark.name),
                    name = benchmark.name,
                    result = result,
                    control = control_label,
                    experiment = experiment_label,
                    control_data = control_data,
                    experiment_data = experiment_data,
                )
            print format_benchmark_result(result, len(control_data.runtimes))
            print
Ejemplo n.º 9
0
    def handle_noargs(self, **kwargs):
        try:
            verbosity = int(kwargs['verbosity'])
        except (KeyError, TypeError, ValueError):
            verbosity = 1

        for release in DocumentRelease.objects.all():
            if verbosity >= 1:
                print "Updating %s..." % release

            destdir = Path(settings.DOCS_BUILD_ROOT).child(release.lang, release.version)
            if not destdir.exists():
                destdir.mkdir(parents=True)

            #
            # Update the release from SCM.
            #

            # Make an SCM checkout/update into the destination directory.
            # Do this dynamically in case we add other SCM later.
            getattr(self, 'update_%s' % release.scm)(release.scm_url, destdir)

            #
            # Use Sphinx to build the release docs into JSON and HTML documents.
            #
            for builder in ('json', 'html'):
                # Make the directory for the built files - sphinx-build doesn't
                # do it for us, apparently.
                build_dir = destdir.child('_build', builder)
                if not build_dir.exists():
                    build_dir.mkdir(parents=True)

                # "Shell out" (not exactly, but basically) to sphinx-build.
                if verbosity >= 2:
                    print "  building %s (into %s)" % (builder, build_dir)
                sphinx.cmdline.main(['sphinx-build',
                    '-b', builder,
                    '-q',              # Be vewy qwiet
                    destdir,           # Source file directory
                    build_dir,         # Destination directory
                ])

            #
            # Create a zip file of the HTML build for offline reading.
            # This gets moved into MEDIA_ROOT for downloading.
            #
            html_build_dir = destdir.child('_build', 'html')
            zipfile_name = 'django-docs-%s-%s.zip' % (release.version, release.lang)
            zipfile_path = Path(settings.MEDIA_ROOT).child('docs', zipfile_name)
            if not zipfile_path.parent.exists():
                zipfile_path.parent.mkdir(parents=True)
            if verbosity >= 2:
                print "  build zip (into %s)" % zipfile_path
            with closing(zipfile.ZipFile(zipfile_path, 'w')) as zf:
                for f in html_build_dir.walk(filter=Path.isfile):
                    zf.write(f, html_build_dir.rel_path_to(f))

            #
            # Rebuild the imported document list and search index.
            #
            if not kwargs['reindex']:
                continue

            if verbosity >= 2:
                print "  reindexing..."

            # Build a dict of {path_fragment: document_object}. We'll pop values
            # out of this dict as we go which'll make sure we know which
            # remaining documents need to be deleted (and unindexed) later on.
            documents = dict((doc.path, doc) for doc in release.documents.all())

            # Walk the tree we've just built looking for ".fjson" documents
            # (just JSON, but Sphinx names them weirdly). Each one of those
            # documents gets a corresponding Document object created which
            # we'll then ask Sphinx to reindex.
            #
            # We have to be a bit careful to reverse-engineer the correct
            # relative path component, especially for "index" documents,
            # otherwise the search results will be incorrect.
            json_build_dir = destdir.child('_build', 'json')
            for built_doc in json_build_dir.walk():
                if built_doc.isfile() and built_doc.ext == '.fjson':

                    # Convert the built_doc path which is now an absolute
                    # path (i.e. "/home/docs/en/1.2/_build/ref/models.json")
                    # into a path component (i.e. "ref/models").
                    path = json_build_dir.rel_path_to(built_doc)
                    if path.stem == 'index':
                        path = path.parent
                    path = str(path.parent.child(path.stem))

                    # Read out the content and create a new Document object for
                    # it. We'll strip the HTML tags here (for want of a better
                    # place to do it).
                    with open(built_doc) as fp:
                        json_doc = json.load(fp)
                        try:
                            json_doc['body']  # Just to make sure it exists.
                            title = strip_tags(json_doc['title'])
                        except KeyError, ex:
                            if verbosity >= 2:
                                print "Skipping: %s (no %s)" % (path, ex.args[0])
                            continue

                    doc = documents.pop(path, Document(path=path, release=release))
                    doc.title = title
                    doc.save()
                    haystack.site.update_object(doc)

            # Clean up any remaining documents.
            for doc in documents.values():
                if verbosity >= 2:
                    print "Deleting:", doc
                haystack.site.remove_object(doc)
                doc.delete()
Ejemplo n.º 10
0
    def handle_noargs(self, **kwargs):
        try:
            verbosity = int(kwargs["verbosity"])
        except (KeyError, TypeError, ValueError):
            verbosity = 1

        # Somehow, bizarely, there's a bug in Sphinx such that if I try to
        # build 1.0 before other versions, things fail in weird ways. However,
        # building newer versions first works. I suspect Sphinx is hanging onto
        # some global state. Anyway, we can work around it by making sure that
        # "dev" builds before "1.0". This is ugly, but oh well.
        for release in DocumentRelease.objects.order_by("-version"):
            if verbosity >= 1:
                print "Updating %s..." % release

            destdir = Path(settings.DOCS_BUILD_ROOT).child(release.lang, release.version)
            if not destdir.exists():
                destdir.mkdir(parents=True)

            #
            # Update the release from SCM.
            #

            # Make an SCM checkout/update into the destination directory.
            # Do this dynamically in case we add other SCM later.
            getattr(self, "update_%s" % release.scm)(release.scm_url, destdir)

            #
            # Use Sphinx to build the release docs into JSON and HTML documents.
            #
            if release.docs_subdir:
                source_dir = destdir.child(*release.docs_subdir.split("/"))
            else:
                source_dir = destdir

            for builder in ("json", "html"):
                # Wipe and re-create the build directory. See #18930.
                build_dir = destdir.child("_build", builder)
                if build_dir.exists():
                    shutil.rmtree(build_dir)
                build_dir.mkdir(parents=True)

                # "Shell out" (not exactly, but basically) to sphinx-build.
                if verbosity >= 2:
                    print "  building %s (%s -> %s)" % (builder, source_dir, build_dir)
                sphinx.cmdline.main(
                    [
                        "sphinx-build",
                        "-b",
                        builder,
                        "-q",  # Be vewy qwiet
                        source_dir,  # Source file directory
                        build_dir,  # Destination directory
                    ]
                )

            #
            # Create a zip file of the HTML build for offline reading.
            # This gets moved into STATIC_ROOT for downloading.
            #
            html_build_dir = destdir.child("_build", "html")
            zipfile_name = "django-docs-%s-%s.zip" % (release.version, release.lang)
            zipfile_path = Path(settings.STATIC_ROOT).child("docs", zipfile_name)
            if not zipfile_path.parent.exists():
                zipfile_path.parent.mkdir(parents=True)
            if verbosity >= 2:
                print "  build zip (into %s)" % zipfile_path

            def zipfile_inclusion_filter(f):
                return f.isfile() and ".doctrees" not in f.components()

            with closing(zipfile.ZipFile(zipfile_path, "w")) as zf:
                for f in html_build_dir.walk(filter=zipfile_inclusion_filter):
                    zf.write(f, html_build_dir.rel_path_to(f))

            #
            # Copy the build results to the directory used for serving
            # the documentation in the least disruptive way possible.
            #
            build_dir = destdir.child("_build")
            built_dir = destdir.child("_built")
            subprocess.check_call(
                ["rsync", "--archive", "--delete", "--link-dest=" + build_dir, build_dir + "/", built_dir]
            )

            #
            # Rebuild the imported document list and search index.
            #
            if not kwargs["reindex"]:
                continue

            if verbosity >= 2:
                print "  reindexing..."

            # Build a dict of {path_fragment: document_object}. We'll pop values
            # out of this dict as we go which'll make sure we know which
            # remaining documents need to be deleted (and unindexed) later on.
            documents = dict((doc.path, doc) for doc in release.documents.all())

            # Walk the tree we've just built looking for ".fjson" documents
            # (just JSON, but Sphinx names them weirdly). Each one of those
            # documents gets a corresponding Document object created which
            # we'll then ask Sphinx to reindex.
            #
            # We have to be a bit careful to reverse-engineer the correct
            # relative path component, especially for "index" documents,
            # otherwise the search results will be incorrect.
            json_built_dir = destdir.child("_built", "json")
            for built_doc in json_built_dir.walk():
                if built_doc.isfile() and built_doc.ext == ".fjson":

                    # Convert the built_doc path which is now an absolute
                    # path (i.e. "/home/docs/en/1.2/_built/ref/models.json")
                    # into a path component (i.e. "ref/models").
                    path = json_built_dir.rel_path_to(built_doc)
                    if path.stem == "index":
                        path = path.parent
                    path = str(path.parent.child(path.stem))

                    # Read out the content and create a new Document object for
                    # it. We'll strip the HTML tags here (for want of a better
                    # place to do it).
                    with open(built_doc) as fp:
                        json_doc = json.load(fp)
                        try:
                            json_doc["body"]  # Just to make sure it exists.
                            title = unescape_entities(strip_tags(json_doc["title"]))
                        except KeyError, ex:
                            if verbosity >= 2:
                                print "Skipping: %s (no %s)" % (path, ex.args[0])
                            continue

                    doc = documents.pop(path, Document(path=path, release=release))
                    doc.title = title
                    doc.save()
                    haystack.site.update_object(doc)

            # Clean up any remaining documents.
            for doc in documents.values():
                if verbosity >= 2:
                    print "Deleting:", doc
                haystack.site.remove_object(doc)
                doc.delete()
Ejemplo n.º 11
0
def run_benchmarks(control, experiment, benchmark_dir, benchmarks, trials,
                   record_dir, profile_dir):
    
    if benchmarks:
        print "Running benchmarks: %s" % " ".join(benchmarks)
    else:
        print "Running all benchmarks"

    if record_dir:
        record_dir = Path(record_dir).expand().absolute()
        if not record_dir.exists():
            raise ValueError('Recording directory "%s" does not exist' % record_dir)
        print "Recording data to '%s'" % record_dir


    print "Control: %s" % control 
    print "Experiment: %s" % experiment
    print
    
    control_env_dir = tempfile.mkdtemp()
    experiment_env_dir = tempfile.mkdtemp()
    
    # create envs
    virtualenv.create_environment(control_env_dir, False)
    virtualenv.create_environment(experiment_env_dir, False)
    
    control_python = setup_env(control_env_dir, control)
    experiment_python = setup_env(experiment_env_dir, experiment)

    control_env = {
        'PYTHONPATH': '%s:%s:%s' % (Path.cwd().absolute(), Path(benchmark_dir), Path(__file__).parent.parent.absolute()),
    }
    experiment_env = control_env.copy()

    for benchmark in discover_benchmarks(benchmark_dir):
        if not benchmarks or benchmark.name in benchmarks:
            print "Running '%s' benchmark ..." % benchmark.name
            settings_mod = '%s.settings' % benchmark.name
            control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
            if profile_dir is not None:
                control_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "con-%s" % benchmark.name)
                experiment_env['DJANGOBENCH_PROFILE_FILE'] = Path(profile_dir, "exp-%s" % benchmark.name)
            try:
                control_data = run_benchmark(benchmark, trials, control_env, control_python)
                experiment_data = run_benchmark(benchmark, trials, experiment_env, experiment_python)
            except SkipBenchmark, reason:
                print "Skipped: %s\n" % reason
                continue

            options = argparse.Namespace(
                track_memory = False,
                diff_instrumentation = False,
                benchmark_name = benchmark.name,
                disable_timelines = True,
                control_label = control,
                experiment_label = experiment,
            )
            result = perf.CompareBenchmarkData(control_data, experiment_data, options)
            if record_dir:
                record_benchmark_results(
                    dest = record_dir.child('%s.json' % benchmark.name),
                    name = benchmark.name,
                    result = result,
                    control = control,
                    experiment = experiment,
                    control_data = control_data,
                    experiment_data = experiment_data,
                )
            print format_benchmark_result(result, len(control_data.runtimes))
            print
Ejemplo n.º 12
0
    def handle_noargs(self, **kwargs):
        try:
            verbosity = int(kwargs['verbosity'])
        except (KeyError, TypeError, ValueError):
            verbosity = 1

        for release in DocumentRelease.objects.all():
            if verbosity >= 1:
                print "Updating %s..." % release

            destdir = Path(settings.DOCS_BUILD_ROOT).child(release.lang, release.version)
            if not destdir.exists():
                destdir.mkdir(parents=True)

            #
            # Update the release from SCM.
            #

            # Make an SCM checkout/update into the destination directory.
            # Do this dynamically in case we add other SCM later.
            getattr(self, 'update_%s' % release.scm)(release.scm_url, destdir)

            #
            # Use Sphinx to build the release docs into JSON and HTML documents.
            #
            for builder in ('json', 'html'):
                # Make the directory for the built files - sphinx-build doesn't
                # do it for us, apparently.
                build_dir = destdir.child('_build', builder)
                if not build_dir.exists():
                    build_dir.mkdir(parents=True)

                # "Shell out" (not exactly, but basically) to sphinx-build.
                if verbosity >= 2:
                    print "  building %s (into %s)" % (builder, build_dir)
                sphinx.cmdline.main(['sphinx-build',
                    '-b', builder,
                    '-q',              # Be vewy qwiet
                    destdir,           # Source file directory
                    build_dir,         # Destination directory
                ])

            #
            # Create a zip file of the HTML build for offline reading.
            # This gets moved into MEDIA_ROOT for downloading.
            #
            html_build_dir = destdir.child('_build', 'html')
            zipfile_name = 'django-docs-%s-%s.zip' % (release.version, release.lang)
            zipfile_path = Path(settings.MEDIA_ROOT).child('docs', zipfile_name)
            if not zipfile_path.parent.exists():
                zipfile_path.parent.mkdir(parents=True)
            if verbosity >= 2:
                print "  build zip (into %s)" % zipfile_path

            def zipfile_inclusion_filter(f):
                return f.isfile() and '.doctrees' not in f.components()

            with closing(zipfile.ZipFile(zipfile_path, 'w')) as zf:
                for f in html_build_dir.walk(filter=zipfile_inclusion_filter):
                    zf.write(f, html_build_dir.rel_path_to(f))

            #
            # Rebuild the imported document list and search index.
            #
            if not kwargs['reindex']:
                continue

            if verbosity >= 2:
                print "  reindexing..."

            # Build a dict of {path_fragment: document_object}. We'll pop values
            # out of this dict as we go which'll make sure we know which
            # remaining documents need to be deleted (and unindexed) later on.
            documents = dict((doc.path, doc) for doc in release.documents.all())

            # Walk the tree we've just built looking for ".fjson" documents
            # (just JSON, but Sphinx names them weirdly). Each one of those
            # documents gets a corresponding Document object created which
            # we'll then ask Sphinx to reindex.
            #
            # We have to be a bit careful to reverse-engineer the correct
            # relative path component, especially for "index" documents,
            # otherwise the search results will be incorrect.
            json_build_dir = destdir.child('_build', 'json')
            for built_doc in json_build_dir.walk():
                if built_doc.isfile() and built_doc.ext == '.fjson':

                    # Convert the built_doc path which is now an absolute
                    # path (i.e. "/home/docs/en/1.2/_build/ref/models.json")
                    # into a path component (i.e. "ref/models").
                    path = json_build_dir.rel_path_to(built_doc)
                    if path.stem == 'index':
                        path = path.parent
                    path = str(path.parent.child(path.stem))

                    # Read out the content and create a new Document object for
                    # it. We'll strip the HTML tags here (for want of a better
                    # place to do it).
                    with open(built_doc) as fp:
                        json_doc = json.load(fp)
                        try:
                            json_doc['body']  # Just to make sure it exists.
                            title = strip_tags(json_doc['title'])
                        except KeyError, ex:
                            if verbosity >= 2:
                                print "Skipping: %s (no %s)" % (path, ex.args[0])
                            continue

                    doc = documents.pop(path, Document(path=path, release=release))
                    doc.title = title
                    doc.save()
                    haystack.site.update_object(doc)

            # Clean up any remaining documents.
            for doc in documents.values():
                if verbosity >= 2:
                    print "Deleting:", doc
                haystack.site.remove_object(doc)
                doc.delete()
Ejemplo n.º 13
0
    def handle_noargs(self, **kwargs):
        try:
            verbosity = int(kwargs["verbosity"])
        except (KeyError, TypeError, ValueError):
            verbosity = 1

        for release in DocumentRelease.objects.all():
            if verbosity >= 1:
                print "Updating %s..." % release

            destdir = Path(settings.DOCS_BUILD_ROOT).child(release.lang, release.version)
            if not destdir.exists():
                destdir.mkdir(parents=True)

            #
            # Update the release from SCM.
            #

            # Make an SCM checkout/update into the destination directory.
            # Do this dynamically in case we add other SCM later.
            getattr(self, "update_%s" % release.scm)(release.scm_url, destdir)

            #
            # Use Sphinx to build the release docs into JSON documents.
            #

            # Make the directory for the JSON files - sphinx-build doesn't
            # do it for us, apparently.
            json_build_dir = destdir.child("_build", "json")
            if not json_build_dir.exists():
                json_build_dir.mkdir(parents=True)

            # "Shell out" (not exactly, but basically) to sphinx-build.
            sphinx.cmdline.main(
                [
                    "sphinx-build",
                    "-b",
                    "json",  # Use the JSON builder
                    "-q",  # Be vewy qwiet
                    destdir,  # Source file directory
                    json_build_dir,  # Destination directory
                ]
            )

            #
            # Rebuild the imported document list and search index.
            #

            # Build a dict of {path_fragment: document_object}. We'll pop values
            # out of this dict as we go which'll make sure we know which
            # remaining documents need to be deleted (and unindexed) later on.
            documents = dict((doc.path, doc) for doc in release.documents.all())

            # Walk the tree we've just built looking for ".fjson" documents
            # (just JSON, but Sphinx names them weirdly). Each one of those
            # documents gets a corresponding Document object created which
            # we'll then ask Sphinx to reindex.
            #
            # We have to be a bit careful to reverse-engineer the correct
            # relative path component, especially for "index" documents,
            # otherwise the search results will be incorrect.
            for built_doc in json_build_dir.walk():
                if built_doc.isfile() and built_doc.ext == ".fjson":

                    # Convert the built_doc path which is now an absolute
                    # path (i.e. "/home/docs/en/1.2/_build/ref/models.json")
                    # into a path component (i.e. "ref/models").
                    path = json_build_dir.rel_path_to(built_doc)
                    if path.stem == "index":
                        path = path.parent
                    path = str(path.parent.child(path.stem))

                    # Read out the content and create a new Document object for
                    # it. We'll strip the HTML tags here (for want of a better
                    # place to do it).
                    with open(built_doc) as fp:
                        json_doc = json.load(fp)
                        try:
                            ignored = json_doc["body"]  # Just to make sure it exists.
                            title = strip_tags(json_doc["title"])
                        except KeyError, ex:
                            if verbosity >= 2:
                                print "Skipping: %s (no %s)" % (path, ex.args[0])
                            continue

                    doc = documents.pop(path, Document(path=path, release=release))
                    doc.title = title
                    if verbosity >= 2:
                        print "Indexing:", doc
                    doc.save()
                    haystack.site.update_object(doc)

            # Clean up any remaining documents.
            for doc in documents.values():
                if verbosity >= 2:
                    print "Deleting:", doc
                haystack.site.remove_object(doc)
                doc.delete()