示例#1
0
文件: ivy_utils.py 项目: aoen/pants
  def symlink_cachepath(ivy_home, inpath, symlink_dir, outpath):
    """Symlinks all paths listed in inpath that are under ivy_home into symlink_dir.

    Preserves all other paths. Writes the resulting paths to outpath.
    Returns a map of path -> symlink to that path.
    """
    safe_mkdir(symlink_dir)
    with safe_open(inpath, 'r') as infile:
      paths = filter(None, infile.read().strip().split(os.pathsep))
    new_paths = []
    for path in paths:
      if not path.startswith(ivy_home):
        new_paths.append(path)
        continue
      symlink = os.path.join(symlink_dir, os.path.relpath(path, ivy_home))
      try:
        os.makedirs(os.path.dirname(symlink))
      except OSError as e:
        if e.errno != errno.EEXIST:
          raise
      # Note: The try blocks cannot be combined. It may be that the dir exists but the link doesn't.
      try:
        os.symlink(path, symlink)
      except OSError as e:
        # We don't delete and recreate the symlink, as this may break concurrently executing code.
        if e.errno != errno.EEXIST:
          raise
      new_paths.append(symlink)
    with safe_open(outpath, 'w') as outfile:
      outfile.write(':'.join(new_paths))
    symlink_map = dict(zip(paths, new_paths))
    return symlink_map
示例#2
0
  def execute(self, targets):
    java_targets = filter(JavaCompile._is_java, targets)
    if java_targets:
      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._output_dir))

        with self.changed(java_targets, invalidate_dependants=True) as changed:
          bases, sources_by_target, processors, fingerprint = self.calculate_sources(changed)
          if sources_by_target:
            classpath = [jar for conf, jar in cp if conf in self._confs]
            result = self.compile(classpath, bases, sources_by_target, fingerprint)
            if result != 0:
              raise TaskError('%s returned %d' % (self._main, result))

            if processors:
              if os.path.exists(self._processor_service_info_file):
                with safe_open(self._processor_service_info_file, 'r') as f:
                  for processor in f:
                    processors.add(processor.strip())
              with safe_open(self._processor_service_info_file, 'w') as f:
                for processor in processors:
                  f.write('%s\n' % processor)

      if self.context.products.isrequired('classes'):
        genmap = self.context.products.get('classes')
        classes_by_target = SunCompiler.findclasses(self._output_dir, targets)
        for target, classes in classes_by_target.items():
          genmap.add(target, self._output_dir, classes)
示例#3
0
    def _spawn_nailgun_server(self, workunit):
        self.context.log.debug("No ng server found, spawning...")

        with _safe_open(self._ng_out, "w"):
            pass  # truncate

        pid = os.fork()
        if pid != 0:
            # In the parent tine - block on ng being up for connections
            return self._await_nailgun_server(workunit)

        # NOTE: Don't use self.context.log or self.context.new_workunit here.
        # They use threadlocal state, which interacts poorly with fork().
        os.setsid()
        in_fd = open("/dev/null", "w")
        out_fd = safe_open(self._ng_out, "w")
        err_fd = safe_open(self._ng_err, "w")
        args = ["java"]
        if self._ng_server_args:
            args.extend(self._ng_server_args)
        args.append(NailgunTask.PANTS_NG_ARG)
        args.append(self._identifier_arg)
        ng_classpath = os.pathsep.join(binary_util.profile_classpath(self._nailgun_profile))
        args.extend(["-cp", ng_classpath, "com.martiansoftware.nailgun.NGServer", ":0"])
        s = " ".join(args)

        with binary_util.safe_classpath():
            subprocess.Popen(args, stdin=in_fd, stdout=out_fd, stderr=err_fd, close_fds=True, cwd=get_buildroot())
            # Prevents finally blocks being executed, unlike sys.exit(). We don't want to execute finally
            # blocks because we might, e.g., clean up tempfiles that the parent still needs.
            os._exit(0)
示例#4
0
文件: test_git.py 项目: aoen/pants
  def setUpClass(cls):
    cls.origin = safe_mkdtemp()
    with pushd(cls.origin):
      subprocess.check_call(['git', 'init', '--bare'])

    cls.gitdir = safe_mkdtemp()
    cls.worktree = safe_mkdtemp()

    cls.readme_file = os.path.join(cls.worktree, 'README')

    with environment_as(GIT_DIR=cls.gitdir, GIT_WORK_TREE=cls.worktree):
      cls.init_repo('depot', cls.origin)

      touch(cls.readme_file)
      subprocess.check_call(['git', 'add', 'README'])
      subprocess.check_call(['git', 'commit', '-am', 'initial commit with decode -> \x81b'])
      subprocess.check_call(['git', 'tag', 'first'])
      subprocess.check_call(['git', 'push', '--tags', 'depot', 'master'])
      subprocess.check_call(['git', 'branch', '--set-upstream', 'master', 'depot/master'])

      with safe_open(cls.readme_file, 'w') as readme:
        readme.write('Hello World.')
      subprocess.check_call(['git', 'commit', '-am', 'Update README.'])

    cls.clone2 = safe_mkdtemp()
    with pushd(cls.clone2):
      cls.init_repo('origin', cls.origin)
      subprocess.check_call(['git', 'pull', '--tags', 'origin', 'master:master'])

      with safe_open(os.path.realpath('README'), 'a') as readme:
        readme.write('--')
      subprocess.check_call(['git', 'commit', '-am', 'Update README 2.'])
      subprocess.check_call(['git', 'push', '--tags', 'origin', 'master'])

    cls.git = Git(gitdir=cls.gitdir, worktree=cls.worktree)
示例#5
0
  def process(self, outdir, base, source, standalone, url_builder, get_config, css=None):
    def parse_url(spec):
      match = MarkdownToHtml.PANTS_LINK.match(spec)
      if match:
        page = Target.get(Address.parse(get_buildroot(), match.group(1)))
        if not page:
          raise TaskError('Invalid link %s' % match.group(1))
        alias, url = url_builder(page, config=get_config(page))
        return alias, url
      else:
        return spec, spec

    def build_url(label):
      components = label.split('|', 1)
      if len(components) == 1:
        return parse_url(label.strip())
      else:
        alias, link = components
        _, url = parse_url(link.strip())
        return alias, url

    wikilinks = WikilinksExtension(build_url)

    path, ext = os.path.splitext(source)
    with safe_open(os.path.join(outdir, path + '.html'), 'w') as output:
      with open(os.path.join(get_buildroot(), base, source), 'r') as input:
        md_html = markdown.markdown(
          input.read(),
          extensions=['codehilite', 'extra', 'toc', wikilinks]
        )
        if standalone:
          if css:
            css_relpath = os.path.relpath(css, outdir)
            out_relpath = os.path.dirname(source)
            link_relpath = os.path.relpath(css_relpath, out_relpath)
            css = '<link rel="stylesheet" type="text/css" href="%s"/>' % link_relpath
          html = textwrap.dedent('''
          <html>
            <head>
              %s
            </head>
            <body>
          <!-- generated by pants! -->
          %s
            </body>
          </html>
          ''').strip() % (css or '', md_html)
          output.write(html)
        else:
          if css:
            with safe_open(css) as fd:
              output.write(textwrap.dedent('''
              <style type="text/css">
              %s
              </style>
              ''').strip() % fd.read())
              output.write('\n')
          output.write(md_html)
        return output.name
 def _prepare_fork(self):
   user, current_user = self._getpwuid()
   uid, gid = user.pw_uid, user.pw_gid
   self._fork_time = self._platform.clock().time()
   self._setup_ckpt()
   self._stdout = safe_open(self._pathspec.with_filename('stdout').getpath('process_logdir'), "w")
   self._stderr = safe_open(self._pathspec.with_filename('stderr').getpath('process_logdir'), "w")
   os.chown(self._stdout.name, user.pw_uid, user.pw_gid)
   os.chown(self._stderr.name, user.pw_uid, user.pw_gid)
示例#7
0
文件: process.py 项目: rosmo/aurora
 def _prepare_fork(self):
   user, current_user = self._getpwuid()
   if self._user:
     if user != current_user and os.geteuid() != 0:
       raise self.PermissionError('Must be root to run processes as other users!')
   uid, gid = user.pw_uid, user.pw_gid
   self._fork_time = self._platform.clock().time()
   self._setup_ckpt()
   self._stdout = safe_open(self._pathspec.with_filename('stdout').getpath('process_logdir'), "a")
   self._stderr = safe_open(self._pathspec.with_filename('stderr').getpath('process_logdir'), "a")
   os.chown(self._stdout.name, user.pw_uid, user.pw_gid)
   os.chown(self._stderr.name, user.pw_uid, user.pw_gid)
示例#8
0
    def stage_artifacts(target, jar, version, changelog, confs=None, synth_target=None):
      def artifact_path(name=None, suffix='', extension='jar', artifact_ext=''):
        return os.path.join(self.outdir, jar.org, jar.name + artifact_ext,
                            '%s%s-%s%s.%s' % (
                              (name or jar.name),
                              artifact_ext if name != 'ivy' else '',
                              version,
                              suffix,
                              extension
                            ))

      def get_pushdb(target):
        return get_db(target)[0]

      with safe_open(artifact_path(suffix='-CHANGELOG', extension='txt'), 'w') as changelog_file:
        changelog_file.write(changelog)
      ivyxml = artifact_path(name='ivy', extension='xml')
      IvyWriter(get_pushdb).write(target, ivyxml, confs)
      PomWriter(get_pushdb).write(target, artifact_path(extension='pom'))

      idl_ivyxml = None
      if synth_target:
        changelog_path = artifact_path(suffix='-CHANGELOG', extension='txt', artifact_ext='-only')
        with safe_open(changelog_path, 'w') as changelog_file:
          changelog_file.write(changelog)
        idl_ivyxml = artifact_path(name='ivy', extension='xml', artifact_ext='-only')
        # use idl publication spec in ivy for idl artifact
        IvyWriter(get_pushdb).write(synth_target, idl_ivyxml, ['idl'], synth=True)
        PomWriter(get_pushdb).write(synth_target,
                                    artifact_path(extension='pom', artifact_ext='-only'),
                                    synth=True)

      def copy(tgt, typename, suffix='', artifact_ext=''):
        genmap = self.context.products.get(typename)
        mapping = genmap.get(tgt)
        if not mapping:
          print('no mapping for %s' % tgt)
        else:
          for basedir, jars in mapping.items():
            for artifact in jars:
              path = artifact_path(suffix=suffix, artifact_ext=artifact_ext)
              shutil.copy(os.path.join(basedir, artifact), path)

      copy(target, typename='jars')
      copy(target, typename='source_jars', suffix='-sources')
      if (synth_target):
        copy(synth_target, typename='idl_jars', suffix='-idl', artifact_ext='-only')

      if is_java(target):
        copy(target, typename='javadoc_jars', suffix='-javadoc')


      return ivyxml, idl_ivyxml
示例#9
0
  def execute(self):
    pages = []
    targets = self.context.targets()
    for target in targets:
      if isinstance(target, Page):
        for wiki_artifact in target.payload.provides:
          pages.append((target, wiki_artifact))

    urls = list()

    genmap = self.context.products.get('wiki_html')
    for page, wiki_artifact in pages:
      html_info = genmap.get((wiki_artifact, page))
      if len(html_info) > 1:
        raise TaskError('Unexpected resources for %s: %s' % (page, html_info))
      basedir, htmls = html_info.items()[0]
      if len(htmls) != 1:
        raise TaskError('Unexpected resources for %s: %s' % (page, htmls))
      with safe_open(os.path.join(basedir, htmls[0])) as contents:
        url = self.publish_page(
          page.address,
          wiki_artifact.config['space'],
          wiki_artifact.config['title'],
          contents.read(),
          # Default to none if not present in the hash.
          parent=wiki_artifact.config.get('parent')
        )
        if url:
          urls.append(url)
          self.context.log.info('Published %s to %s' % (page, url))

    if self.open and urls:
      binary_util.ui_open(*urls)
示例#10
0
  def execute_single_compilation(self, versioned_targets, cp):
    # TODO: Use the artifact cache.

    depfile = self.create_depfile_path(versioned_targets.targets)

    if not versioned_targets.valid:
      self.merge_depfile(versioned_targets)  # Get what we can from previous builds.
      self.context.log.info('Compiling targets %s' % str(versioned_targets.targets))
      sources_by_target, processors, fingerprint = self.calculate_sources(versioned_targets.targets)
      if sources_by_target:
        sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
        if not sources:
          self.context.log.warn('Skipping java compile for targets with no sources:\n  %s' %
                                '\n  '.join(str(t) for t in sources_by_target.keys()))
        else:
          classpath = [jar for conf, jar in cp if conf in self._confs]
          result = self.compile(classpath, sources, fingerprint, depfile)
          if result != 0:
            default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
            raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))

        if processors and not self.dry_run:
          # Produce a monolithic apt processor service info file for further compilation rounds
          # and the unit test classpath.
          processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
          if os.path.exists(processor_info_file):
            with safe_open(processor_info_file, 'r') as f:
              for processor in f:
                processors.add(processor.strip())
          self.write_processor_info(processor_info_file, processors)

    self.post_process(versioned_targets)
示例#11
0
    def _get_nailgun_endpoint(self):
        if os.path.exists(self._pidfile):
            with _safe_open(self._pidfile, "r") as pidfile:
                contents = pidfile.read()

                def invalid_pidfile():
                    log.warn("Invalid ng pidfile %s contained: %s" % (self._pidfile, contents))
                    return None

                endpoint = contents.split(":")
                if len(endpoint) != 2:
                    return invalid_pidfile()
                pid, port = endpoint
                try:
                    return int(pid.strip()), int(port.strip())
                except ValueError:
                    return invalid_pidfile()
        elif NailgunTask._find:
            pid_port = NailgunTask._find(self._pidfile)
            if pid_port:
                self.context.log.info("found ng server @ pid:%d port:%d" % pid_port)
                with safe_open(self._pidfile, "w") as pidfile:
                    pidfile.write("%d:%d\n" % pid_port)
            return pid_port
        return None
示例#12
0
  def write(self, target, path, confs=None):
    def as_jar(internal_target):
      jar, _, _, _ = self.get_db(internal_target).as_jar_with_version(internal_target)
      return jar

    # TODO(John Sirois): a dict is used here to de-dup codegen targets which have both the original
    # codegen target - say java_thrift_library - and the synthetic generated target (java_library)
    # Consider reworking codegen tasks to add removal of the original codegen targets when rewriting
    # the graph
    dependencies = OrderedDict()
    internal_codegen = {}
    for dep in target_internal_dependencies(target):
      jar = as_jar(dep)
      dependencies[(jar.org, jar.name)] = self.internaldep(jar, dep)
      if dep.is_codegen:
        internal_codegen[jar.name] = jar.name
    for jar in target.jar_dependencies:
      if jar.rev:
        dependencies[(jar.org, jar.name)] = self.jardep(jar)
    target_jar = self.internaldep(as_jar(target)).extend(dependencies=dependencies.values())

    template_kwargs = self.templateargs(target_jar, confs)
    with safe_open(path, 'w') as output:
      template = pkgutil.get_data(__name__, self.template_relpath)
      Generator(template, **template_kwargs).write(output)
示例#13
0
 def from_file(filename, **kw):
   try:
     with safe_open(filename) as fp:
       task = Task.json_load(fp)
     return ThermosTaskWrapper(task, **kw)
   except Exception as e:
     return None
示例#14
0
  def _await_nailgun_server(self, stdout, stderr):
    nailgun_timeout_seconds = 5
    max_socket_connect_attempts = 10
    nailgun = None
    port_parse_start = time.time()
    with safe_open(self._ng_out, 'r') as ng_out:
      while not nailgun:
        started = ng_out.readline()
        if started:
          port = self._parse_nailgun_port(started)
          nailgun = self._create_ngclient(port, stdout, stderr)
          log.debug('Detected ng server up on port %d' % port)
        elif time.time() - port_parse_start > nailgun_timeout_seconds:
          raise NailgunClient.NailgunError('Failed to read ng output after'
                                           ' %s seconds' % nailgun_timeout_seconds)

    attempt = 0
    while nailgun:
      sock = nailgun.try_connect()
      if sock:
        sock.close()
        endpoint = self._get_nailgun_endpoint()
        if endpoint:
          log.debug('Connected to ng server with fingerprint %s pid: %d @ port: %d' % endpoint)
        else:
          raise NailgunClient.NailgunError('Failed to connect to ng server.')
        return nailgun
      elif attempt > max_socket_connect_attempts:
        raise nailgun.NailgunError('Failed to connect to ng output after %d connect attempts'
                                   % max_socket_connect_attempts)
      attempt += 1
      log.debug('Failed to connect on attempt %d' % attempt)
      time.sleep(0.1)
示例#15
0
          def generate_reports():
            args = [
              'report',
              '-in', self.coverage_metadata_file,
              '-in', self.coverage_file,
              '-exit'
            ]
            source_bases = set(t.target_base for t in targets)
            for source_base in source_bases:
              args.extend(['-sp', source_base])

            sorting = ['-Dreport.sort', '+name,+class,+method,+block']
            if self.coverage_report_console:
              args.extend(['-r', 'txt',
                           '-Dreport.txt.out.file=%s' % self.coverage_console_file] + sorting)
            if self.coverage_report_xml:
              args.extend(['-r', 'xml','-Dreport.xml.out.file=%s' % self.coverage_xml_file])
            if self.coverage_report_html:
              args.extend(['-r', 'html',
                           '-Dreport.html.out.file=%s' % self.coverage_html_file,
                           '-Dreport.out.encoding=UTF-8'] + sorting)

            result = runjava(
              classpath=emma_classpath,
              main='emma',
              args=args
            )
            if result != 0:
              raise TaskError('Failed to emma generate code coverage reports: %d' % result)

            if self.coverage_report_console:
              with safe_open(self.coverage_console_file) as console_report:
                sys.stdout.write(console_report.read())
            if self.coverage_report_html_open:
              binary_utils.open(self.coverage_html_file)
示例#16
0
    def execute(self, targets):
        try:
            wiki = Confluence.login(self.url)
        except ConfluenceError as e:
            raise TaskError("Failed to login to confluence: %s" % e)

        urls = list()

        genmap = self.context.products.get("markdown_html")
        for page in filter(lambda t: isinstance(t, Page), targets):
            wikiconfig = page.wiki_config(self.wiki())
            if wikiconfig:
                html_info = genmap.get((self.wiki(), page))
                if len(html_info) > 1:
                    raise TaskError("Unexpected resources for %s: %s" % (page, html_info))
                basedir, htmls = html_info.items()[0]
                if len(htmls) != 1:
                    raise TaskError("Unexpected resources for %s: %s" % (page, htmls))
                with safe_open(os.path.join(basedir, htmls[0])) as contents:
                    url = self.publish_page(
                        wiki, wikiconfig["space"], wikiconfig["title"], contents.read(), parent=wikiconfig.get("parent")
                    )
                    if url:
                        urls.append(url)
                        self.context.log.info("Published %s to %s" % (page, url))

        if self.open and urls:
            binary_utils.open(*urls)
示例#17
0
def select_binary(base_path, version, name, config=None):
  """Selects a binary matching the current os and architecture.

  :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
    and name could be found.
  """
  # TODO(John Sirois): finish doc of the path structure expexcted under base_path
  config = config or Config.load()
  bootstrap_dir = config.getdefault('pants_bootstrapdir')

  binary_path = select_binary_base_path(base_path, version, name)
  bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path)
  if not os.path.exists(bootstrapped_binary_path):
    downloadpath = bootstrapped_binary_path + '~'
    try:
      with select_binary_stream(base_path, version, name, config) as stream:
        with safe_open(downloadpath, 'wb') as bootstrapped_binary:
          bootstrapped_binary.write(stream())
        os.rename(downloadpath, bootstrapped_binary_path)
        chmod_plus_x(bootstrapped_binary_path)
    finally:
      safe_delete(downloadpath)

  log.debug('Selected {binary} binary bootstrapped to: {path}'
            .format(binary=name, path=bootstrapped_binary_path))
  return bootstrapped_binary_path
示例#18
0
  def execute(self, targets):
    java_targets = filter(_is_java, targets)
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      egroups = self.context.products.get_data('exclusives_groups')
      group_id = egroups.get_group_key_for_target(java_targets[0])
      for conf in self._confs:
        egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])
        egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])


      with self.invalidated(java_targets, invalidate_dependents=True,
                            partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          exclusives_classpath = egroups.get_classpath_for_group(group_id)
          self.execute_single_compilation(vt, exclusives_classpath)
          if not self.dry_run:
            vt.update()

        for vt in invalidation_check.all_vts:
          depfile = self.create_depfile_path(vt.targets)
          if not self.dry_run and os.path.exists(depfile):
            # Read in the deps created either just now or by a previous run on these targets.
            deps = Dependencies(self._classes_dir)
            deps.load(depfile)
            self._deps.merge(deps)

      if not self.dry_run:
        if self.context.products.isrequired('classes'):
          genmap = self.context.products.get('classes')
          # Map generated classes to the owning targets and sources.
          for target, classes_by_source in self._deps.findclasses(java_targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, self._classes_dir, classes)
              genmap.add(target, self._classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # 'Map' (rewrite) annotation processor service info files to the owning targets.
          for target in java_targets:
            if is_apt(target) and target.processors:
              basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
              processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
              self.write_processor_info(processor_info_file, target.processors)
              genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])

        # Produce a monolithic apt processor service info file for further compilation rounds
        # and the unit test classpath.
        all_processors = set()
        for target in java_targets:
          if is_apt(target) and target.processors:
            all_processors.update(target.processors)
        processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
        if os.path.exists(processor_info_file):
          with safe_open(processor_info_file, 'r') as f:
            for processor in f:
              all_processors.add(processor.strip())
        self.write_processor_info(processor_info_file, all_processors)
示例#19
0
  def execute(self, targets):
    pages = []
    for target in targets:
      if isinstance(target, Page):
        wikiconfig = target.wiki_config(self.wiki())
        if wikiconfig:
          pages.append((target, wikiconfig))

    urls = list()

    genmap = self.context.products.get('wiki_html')
    for page, wikiconfig in pages:
      html_info = genmap.get((self.wiki(), page))
      if len(html_info) > 1:
        raise TaskError('Unexpected resources for %s: %s' % (page, html_info))
      basedir, htmls = html_info.items()[0]
      if len(htmls) != 1:
        raise TaskError('Unexpected resources for %s: %s' % (page, htmls))
      with safe_open(os.path.join(basedir, htmls[0])) as contents:
        url = self.publish_page(
          page.address,
          wikiconfig['space'],
          wikiconfig['title'],
          contents.read(),
          parent=wikiconfig.get('parent')
        )
        if url:
          urls.append(url)
          self.context.log.info('Published %s to %s' % (page, url))

    if self.open and urls:
      binary_util.ui_open(*urls)
示例#20
0
  def checkstyle(self, sources, targets):
    egroups = self.context.products.get_data('exclusives_groups')
    etag = egroups.get_group_key_for_target(targets[0])
    classpath = self._jvm_tool_bootstrapper.get_jvm_tool_classpath(self._checkstyle_bootstrap_key)
    cp = egroups.get_classpath_for_group(etag)
    classpath.extend(jar for conf, jar in cp if conf in self._confs)

    args = [
      '-c', self._configuration_file,
      '-f', 'plain'
    ]

    if self._properties:
      properties_file = os.path.join(self._work_dir, 'checkstyle.properties')
      with safe_open(properties_file, 'w') as pf:
        for k, v in self._properties.items():
          pf.write('%s=%s\n' % (k, v))
      args.extend(['-p', properties_file])

    # We've hit known cases of checkstyle command lines being too long for the system so we guard
    # with Xargs since checkstyle does not accept, for example, @argfile style arguments.
    def call(xargs):
      return self.runjava(classpath=classpath, main=CHECKSTYLE_MAIN,
                          args=args + xargs, workunit_name='checkstyle')
    checks = Xargs(call)

    return checks.execute(sources)
示例#21
0
 def from_file(cls, filename, **kw):
   try:
     with safe_open(filename) as fp:
       task = Task.json_load(fp)
     return cls(task, **kw)
   except Exception:
     return None
示例#22
0
    def stage_artifacts(target, jar, version, changelog, confs=None):
      def artifact_path(name=None, suffix='', extension='jar'):
        return os.path.join(self.outdir, jar.org, jar.name,
                            '%s-%s%s.%s' % ((name or jar.name), version, suffix, extension))

      with safe_open(artifact_path(suffix='-CHANGELOG', extension='txt'), 'w') as changelog_file:
        changelog_file.write(changelog)

      def get_pushdb(target):
        return get_db(target)[0]

      PomWriter(get_pushdb).write(target, artifact_path(extension='pom'))

      ivyxml = artifact_path(name='ivy', extension='xml')
      IvyWriter(get_pushdb).write(target, ivyxml, confs)

      def copy(typename, suffix=''):
        genmap = self.context.products.get(typename)
        for basedir, jars in genmap.get(target).items():
          for artifact in jars:
            shutil.copy(os.path.join(basedir, artifact), artifact_path(suffix=suffix))

      copy('jars')
      if is_java(target):
        copy('javadoc_jars', '-javadoc')
      copy('source_jars', '-sources')

      return ivyxml
示例#23
0
 def _get_file(self, name):
   if self._mode == LoggerMode.STANDARD:
     return safe_open(self._get_log_path(name), mode='a')
   if self._mode == LoggerMode.ROTATE:
     log_size = int(self._rotate_log_size.as_(Data.BYTES))
     return RotatingFileHandler(self._get_log_path(name),
                                log_size,
                                self._rotate_log_backups)
示例#24
0
  def execute(self, targets):
    if not self._flatten and len(targets) > 1:
      topologically_sorted_targets = filter(JavaCompile._is_java, reversed(InternalTarget.sort_targets(targets)))
      for target in topologically_sorted_targets:
        self.execute([target])
      return

    self.context.log.info('Compiling targets %s' % str(targets))

    java_targets = filter(JavaCompile._is_java, targets)
    if java_targets:
      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          cp.insert(0, (conf, self._classes_dir))

        with self.changed(java_targets, invalidate_dependants=True) as changed:
          sources_by_target, processors, fingerprint = self.calculate_sources(changed)
          if sources_by_target:
            sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
            if not sources:
              self.context.log.warn('Skipping java compile for targets with no sources:\n  %s' %
                                    '\n  '.join(str(t) for t in sources_by_target.keys()))
            else:
              classpath = [jar for conf, jar in cp if conf in self._confs]
              result = self.compile(classpath, sources, fingerprint)
              if result != 0:
                default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
                raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))

            if processors:
              # Produce a monolithic apt processor service info file for further compilation rounds
              # and the unit test classpath.
              processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
              if os.path.exists(processor_info_file):
                with safe_open(processor_info_file, 'r') as f:
                  for processor in f:
                    processors.add(processor.strip())
              self.write_processor_info(processor_info_file, processors)

      if self.context.products.isrequired('classes'):
        genmap = self.context.products.get('classes')

        # Map generated classes to the owning targets and sources.
        dependencies = Dependencies(self._classes_dir, self._dependencies_file)
        for target, classes_by_source in dependencies.findclasses(targets).items():
          for source, classes in classes_by_source.items():
            genmap.add(source, self._classes_dir, classes)
            genmap.add(target, self._classes_dir, classes)

        # TODO(John Sirois): Map target.resources in the same way
        # 'Map' (rewrite) annotation processor service info files to the owning targets.
        for target in targets:
          if is_apt(target) and target.processors:
            basedir = os.path.join(self._resources_dir, target.id)
            processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
            self.write_processor_info(processor_info_file, target.processors)
            genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
示例#25
0
文件: fetcher.py 项目: aoen/pants
 def download_fp(_path_or_fd):
   if _path_or_fd and not isinstance(_path_or_fd, Compatibility.string):
     yield _path_or_fd, _path_or_fd.name
   else:
     if not _path_or_fd:
       fd, _path_or_fd = tempfile.mkstemp()
       os.close(fd)
     with safe_open(_path_or_fd, 'w') as fp:
       yield fp, _path_or_fd
示例#26
0
  def _do_in_context(self, work, path=None):
    # TODO(John Sirois): eliminate the need for all the gymanstics needed to synthesize a target
    build_dir = path or self.config.getdefault('pants_workdir')
    build_path = os.path.join(build_dir, 'BUILD.pants')
    if not os.path.exists(build_path):
      with safe_open(build_path, 'w') as build_file:
        build_file.write('# dummy BUILD file generated by pants\n')

    return ParseContext(BuildFile(get_buildroot(), build_path)).do_in_context(work)
示例#27
0
  def create_file(cls, relpath, contents='', mode='w'):
    """Writes to a file under the buildroot.

    relpath:  The relative path to the file from the build root.
    contents: A string containing the contents of the file - '' by default..
    mode:     The mode to write to the file in - over-write by default.
    """
    with safe_open(os.path.join(cls.build_root, relpath), mode=mode) as fp:
      fp.write(contents)
示例#28
0
  def write(self, target, path):
    dependencies = [self.internaldep(dep) for dep in target.internal_dependencies]
    dependencies.extend(PomWriter.jardep(dep) for dep in target.jar_dependencies if dep.rev)
    target_jar = self.internaldep(target).extend(dependencies=dependencies)

    with safe_open(path, 'w') as output:
      generator = Generator(pkgutil.get_data(__name__, os.path.join('jar_publish', 'pom.mk')),
                            artifact=target_jar)
      generator.write(output)
示例#29
0
文件: scrooge_gen.py 项目: aoen/pants
 def write_gen_file_map_for_target(self, gen_file_map, target, outdir):
   def calc_srcs(target):
     _, srcs = calculate_compile_sources([target], self.is_gentarget)
     return srcs
   with safe_open(self.gen_file_map_path_for_target(target, outdir), 'w') as f:
     for src in sorted(calc_srcs(target)):
       clss = gen_file_map[src]
       for cls in sorted(clss):
         print('%s -> %s' % (src, os.path.join(outdir, cls)), file=f)
示例#30
0
 def write_plugin_info(self, target):
   basedir = os.path.join(self._resources_dir, target.id)
   with safe_open(os.path.join(basedir, _PLUGIN_INFO_FILE), 'w') as f:
     f.write(textwrap.dedent('''
       <plugin>
         <name>%s</name>
         <classname>%s</classname>
       </plugin>
     ''' % (target.plugin, target.classname)).strip())
   return basedir
示例#31
0
def safe_args(args,
              max_args=None,
              config=None,
              argfile=None,
              delimiter='\n',
              quoter=None,
              delete=True):
    """
    Yields args if there are less than a limit otherwise writes args to an argfile and yields an
    argument list with one argument formed from the path of the argfile.

    :args The args to work with.
    :max_args The maximum number of args to let though without writing an argfile.  If not specified
              then the maximum will be loaded from config.
    :config Used to lookup the configured maximum number of args that can be passed to a subprocess;
            defaults to the default config and looks for key 'max_subprocess_args' in the DEFAULTS.
    :argfile The file to write args to when there are too many; defaults to a temporary file.
    :delimiter The delimiter to insert between args written to the argfile, defaults to '\n'
    :quoter A function that can take the argfile path and return a single argument value;
            defaults to:
            <code>lambda f: '@' + f<code>
    :delete If True deletes any arg files created upon exit from this context; defaults to True.
  """
    max_args = max_args or (config or Config.load()).getdefault(
        'max_subprocess_args', int, 10)
    if len(args) > max_args:

        def create_argfile(fp):
            fp.write(delimiter.join(args))
            fp.close()
            return [quoter(fp.name) if quoter else '@%s' % fp.name]

        if argfile:
            try:
                with safe_open(argfile, 'w') as fp:
                    yield create_argfile(fp)
            finally:
                if delete and os.path.exists(argfile):
                    os.unlink(argfile)
        else:
            with temporary_file(cleanup=delete) as fp:
                yield create_argfile(fp)
    else:
        yield args
示例#32
0
 def run_tests(classpath, main, jvmargs=None):
     if self.only_write_cmd_line is None:
         with safe_args(tests) as all_tests:
             result = runjava(
                 jvmargs=(jvmargs or []) + self.java_args,
                 classpath=classpath,
                 main=main,
                 args=self.flags + all_tests,
             )
     else:
         with safe_open(self.only_write_cmd_line, 'w') as fd:
             result = runjava(jvmargs=(jvmargs or []) +
                              self.java_args,
                              classpath=classpath,
                              main=main,
                              args=self.flags + tests,
                              only_write_cmd_line_to=fd)
     if result != 0:
         raise TaskError()
示例#33
0
    def _get_handler(self, name):
        """
    Constructs correct handler or file object based on the provided configuration.
    """

        # On no destination write logs to /dev/null
        if self._destination == LoggerDestination.NONE:
            return StreamHandler(safe_open(os.devnull, 'w'))

        # Streamed logs to predefined outputs
        if self._destination == LoggerDestination.CONSOLE:
            return sys.stdout if name == self.STDOUT else sys.stderr

        # Streaming AND file logs are required
        if self._destination == LoggerDestination.BOTH:
            return TeeHandler(self._get_stream(name), self._get_file(name))

        # File only logs are required
        return self._get_file(name)
示例#34
0
文件: util.py 项目: xianxu/pants
 def maybe_locally_cache(dist, cache_dir):
     from pkg_resources import PathMetadata, Distribution
     from twitter.common.dirutil import safe_rmtree, safe_open, safe_mkdir
     egg_name = os.path.join(cache_dir, dist.egg_name() + '.egg')
     safe_mkdir(cache_dir)
     if not os.path.exists(egg_name):
         egg_tmp_path = tempfile.mkdtemp(dir=cache_dir,
                                         prefix=dist.egg_name())
         for fn, content in DistributionHelper.walk(dist):
             with safe_open(os.path.join(egg_tmp_path, fn), 'wb') as fp:
                 fp.write(content)
         try:
             os.rename(egg_tmp_path, egg_name)
         except OSError as e:
             # Handle the race condition of other people trying to write into the target cache.
             if e.errno == errno.ENOTEMPTY:
                 safe_rmtree(egg_tmp_path)
     metadata = PathMetadata(egg_name, os.path.join(egg_name, 'EGG-INFO'))
     return Distribution.from_filename(egg_name, metadata=metadata)
示例#35
0
文件: jvm_run.py 项目: jalons/commons
  def execute(self, targets):
    # The called binary may block for a while, allow concurrent pants activity during this pants
    # idle period.
    #
    # TODO(John Sirois): refactor lock so that I can do:
    # with self.context.lock.yield():
    #   - blocking code
    #
    # Currently re-acquiring the lock requires a path argument that was set up by the goal
    # execution engine.  I do not want task code to learn the lock location.
    # http://jira.local.twitter.com/browse/AWESOME-1317

    self.context.lock.release()
    # Run the first target that is a binary.
    binaries = filter(is_binary, targets)
    if len(binaries) > 0:  # We only run the first one.
      main = binaries[0].main

      # TODO(John Sirois): Since --dry-run is plumbed throughout the Task infra it seems like we
      # should just be using that.  Ask Benjy why this particular task uses a custom file.
      def run_binary(dryrun=False):
        def run_workunit_factory(name, labels=list(), cmd=''):
          return self.context.new_workunit(name=name, labels=[WorkUnit.RUN] + labels, cmd=cmd)

        result = runjava_indivisible(
          jvmargs=self.jvm_args,
          classpath=(self.classpath(confs=self.confs)),
          main=main,
          args=self.args,
          dryrun=dryrun,
          workunit_factory=run_workunit_factory,
          workunit_name='run'
        )
        if dryrun:
          return result
        if result != 0:
          raise TaskError()

      result = run_binary(dryrun=self.only_write_cmd_line)
      if self.only_write_cmd_line:
        with safe_open(self.only_write_cmd_line, 'w') as fd:
          fd.write(result)
示例#36
0
    def __init__(self, filename, max_bytes, max_backups, mode='w'):
        """
      required:
        filename    = The file name.
        max_bytes   = The maximum size of an individual log file.
        max_backups = The maximum number of log file backups to create.

      optional:
        mode = Mode to open the file in.
    """
        if max_bytes > 0 and max_backups <= 0:
            raise ValueError(
                'A positive value for max_backups must be specified if max_bytes > 0.'
            )
        self._max_bytes = max_bytes
        self._max_backups = max_backups
        self.file = safe_open(filename, mode=mode)
        self.filename = filename
        self.mode = mode
        self.closed = False
示例#37
0
    def checkstyle(self, sources):
        classpath = self.profile_classpath(self._profile)
        with self.context.state('classpath', []) as cp:
            classpath.extend(jar for conf, jar in cp if conf in self._confs)

        opts = ['-c', self._configuration_file, '-f', 'plain']

        if self._properties:
            properties_file = os.path.join(self._work_dir,
                                           'checkstyle.properties')
            with safe_open(properties_file, 'w') as pf:
                for k, v in self._properties.items():
                    pf.write('%s=%s\n' % (k, v))
            opts.extend(['-p', properties_file])

        return self.runjava(CHECKSTYLE_MAIN,
                            classpath=classpath,
                            opts=opts,
                            args=sources,
                            workunit_name='checkstyle')
示例#38
0
  def report(self, targets, tests, junit_classpath):
    emma_classpath = self._task_exports.tool_classpath(self._emma_bootstrap_key)
    args = [
      'report',
      '-in', self._coverage_metadata_file,
      '-in', self._coverage_file,
      '-exit'
      ]
    source_bases = set()
    def collect_source_base(target):
      if self.is_coverage_target(target):
        source_bases.add(target.target_base)
    for target in self._test_target_candidates(targets):
      target.walk(collect_source_base)
    for source_base in source_bases:
      args.extend(['-sp', source_base])

    sorting = ['-Dreport.sort', '+name,+class,+method,+block']
    if self._coverage_report_console:
      args.extend(['-r', 'txt',
                   '-Dreport.txt.out.file=%s' % self._coverage_console_file] + sorting)
    if self._coverage_report_xml:
      args.extend(['-r', 'xml', '-Dreport.xml.out.file=%s' % self._coverage_xml_file])
    if self._coverage_report_html:
      args.extend(['-r', 'html',
                   '-Dreport.html.out.file=%s' % self._coverage_html_file,
                   '-Dreport.out.encoding=UTF-8'] + sorting)

    main = 'emma'
    result = execute_java(classpath=emma_classpath, main=main, args=args,
                          workunit_factory=self._context.new_workunit,
                          workunit_name='emma-report')
    if result != 0:
      raise TaskError("java %s ... exited non-zero (%i)"
                      " 'failed to generate code coverage reports'" % (main, result))

    if self._coverage_report_console:
      with safe_open(self._coverage_console_file) as console_report:
        sys.stdout.write(console_report.read())
    if self._coverage_report_html_open:
      binary_util.ui_open(self._coverage_html_file)
示例#39
0
def find_java_home():
    # A kind-of-insane hack to find the effective java home. On some platforms there are so
    # many hard and symbolic links into the JRE dirs that it's actually quite hard to
    # establish what path to use as the java home, e.g., for the purpose of rebasing.
    # In practice, this seems to work fine.
    #
    # TODO: In the future we should probably hermeticize the Java enivronment rather than relying
    # on whatever's on the shell's PATH. E.g., you either specify a path to the Java home via a
    # cmd-line flag or .pantsrc, or we infer one with this method but verify that it's of a
    # supported version.
    with temporary_dir() as tmpdir:
        with safe_open(os.path.join(tmpdir, 'X.java'), 'w') as srcfile:
            srcfile.write(
                'class X { public static void main(String[] argv) { '
                'System.out.println(System.getProperty("java.home")); } }')
        subprocess.Popen(['javac', '-d', tmpdir, srcfile.name],
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE).communicate()
        return subprocess.Popen(['java', '-cp', tmpdir, 'X'],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE).communicate()[0]
示例#40
0
  def checkstyle(self, sources, targets):
    egroups = self.context.products.get_data('exclusives_groups')
    etag = egroups.get_group_key_for_target(targets[0])
    classpath = self.profile_classpath(self._profile)
    cp = egroups.get_classpath_for_group(etag)
    classpath.extend(jar for conf, jar in cp if conf in self._confs)

    opts = [
      '-c', self._configuration_file,
      '-f', 'plain'
    ]

    if self._properties:
      properties_file = os.path.join(self._work_dir, 'checkstyle.properties')
      with safe_open(properties_file, 'w') as pf:
        for k, v in self._properties.items():
          pf.write('%s=%s\n' % (k, v))
      opts.extend(['-p', properties_file])

    return self.runjava(CHECKSTYLE_MAIN, classpath=classpath, opts=opts, args=sources,
                        workunit_name='checkstyle')
示例#41
0
    def execute(self, targets):
        # Run the first target that is a binary.
        self.context.lock.release()
        binaries = filter(is_binary, targets)
        if len(binaries) > 0:  # We only run the first one.
            main = binaries[0].main

            def run_binary(only_write_cmd_line_to):
                result = runjava(jvmargs=self.jvm_args,
                                 classpath=(self.classpath(confs=self.confs)),
                                 main=main,
                                 args=self.args,
                                 only_write_cmd_line_to=only_write_cmd_line_to)
                if result != 0:
                    raise TaskError()

            if self.only_write_cmd_line is None:
                run_binary(None)
            else:
                with safe_open(self.only_write_cmd_line, 'w') as fd:
                    run_binary(fd)
示例#42
0
    def execute(self, targets):
        # The called binary may block for a while, allow concurrent pants activity during this pants
        # idle period.
        #
        # TODO(John Sirois): refactor lock so that I can do:
        # with self.context.lock.yield():
        #   - blocking code
        #
        # Currently re-acquiring the lock requires a path argument that was set up by the goal
        # execution engine.  I do not want task code to learn the lock location.
        # http://jira.local.twitter.com/browse/AWESOME-1317

        self.context.lock.release()
        # Run the first target that is a binary.
        binaries = filter(is_binary, targets)
        if len(binaries) > 0:  # We only run the first one.
            main = binaries[0].main
            egroups = self.context.products.get_data('exclusives_groups')
            group_key = egroups.get_group_key_for_target(binaries[0])
            group_classpath = egroups.get_classpath_for_group(group_key)

            executor = CommandLineGrabber(
            ) if self.only_write_cmd_line else None
            result = execute_java(classpath=(self.classpath(
                confs=self.confs, exclusives_classpath=group_classpath)),
                                  main=main,
                                  executor=executor,
                                  jvm_options=self.jvm_args,
                                  args=self.args,
                                  workunit_factory=self.context.new_workunit,
                                  workunit_name='run',
                                  workunit_labels=[WorkUnit.RUN])

            if self.only_write_cmd_line:
                with safe_open(self.only_write_cmd_line, 'w') as outfile:
                    outfile.write(' '.join(executor.cmd))
            elif result != 0:
                raise TaskError('java %s ... exited non-zero (%i)' %
                                (main, result),
                                exit_code=result)
示例#43
0
  def upload_sync(self, stats):
    try:
      last_modified = self.collect_host_env_info(stats)
      if not last_modified:
        last_modified = int(os.path.getmtime(self._pants_stat_file))

      with safe_open(self._pants_stat_file, 'r') as stats_file:
        lines = stats_file.readlines()
      #Just want to make sure, we do not wait for MAX_RECORDS but also upload when
      #the last time we uploaded is less than configured value in the pants.ini
      last_uploaded = Amount(int(time.time()) - last_modified, Time.SECONDS)
      if (self.force_stats_upload or len(lines) >= MAX_RECORDS or last_uploaded > self._max_delay):
        #Put the file in the right place.
        dirutil.safe_mkdir(self._stats_dir)
        with temporary_file(self._stats_dir, False) as stats_uploader_tmpfile:
          os.rename(self._pants_stat_file, stats_uploader_tmpfile.name)
        self._stats_http_client.process_stats_file()
      #Merge Logs so that user /tmp is not cluttered with too many log files for each run.
      self.merge_logs()
      sys.exit(0)
    except OSError as e:
      log.debug("Error manipulating stats files for upload %s" % e)
示例#44
0
    def _await_nailgun_server(self, stdout, stderr):
        nailgun_timeout_seconds = 5
        max_socket_connect_attempts = 10
        nailgun = None
        port_parse_start = time.time()
        with safe_open(self._ng_out, 'r') as ng_out:
            while not nailgun:
                started = ng_out.readline()
                if started:
                    port = self._parse_nailgun_port(started)
                    nailgun = self._create_ngclient(port, stdout, stderr)
                    log.debug('Detected ng server up on port %d' % port)
                elif time.time() - port_parse_start > nailgun_timeout_seconds:
                    raise NailgunClient.NailgunError(
                        'Failed to read ng output after'
                        ' %s seconds' % nailgun_timeout_seconds)

        attempt = 0
        while nailgun:
            sock = nailgun.try_connect()
            if sock:
                sock.close()
                endpoint = self._get_nailgun_endpoint()
                if endpoint:
                    log.debug(
                        'Connected to ng server with fingerprint %s pid: %d @ port: %d'
                        % endpoint)
                else:
                    raise NailgunClient.NailgunError(
                        'Failed to connect to ng server.')
                return nailgun
            elif attempt > max_socket_connect_attempts:
                raise nailgun.NailgunError(
                    'Failed to connect to ng output after %d connect attempts'
                    % max_socket_connect_attempts)
            attempt += 1
            log.debug('Failed to connect on attempt %d' % attempt)
            time.sleep(0.1)
示例#45
0
        def stage_artifact(tgt,
                           jar,
                           version,
                           changelog,
                           confs=None,
                           artifact_ext=''):
            def path(name=None, suffix='', extension='jar'):
                return artifact_path(jar,
                                     version,
                                     name=name,
                                     suffix=suffix,
                                     extension=extension,
                                     artifact_ext=artifact_ext)

            with safe_open(path(suffix='-CHANGELOG', extension='txt'),
                           'w') as changelog_file:
                changelog_file.write(changelog)
            ivyxml = path(name='ivy', extension='xml')

            IvyWriter(get_pushdb).write(tgt, ivyxml, confs=confs)
            PomWriter(get_pushdb).write(tgt, path(extension='pom'))

            return ivyxml
示例#46
0
 def _get_nailgun_endpoint(self):
   if os.path.exists(self._pidfile):
     with _safe_open(self._pidfile, 'r') as pidfile:
       contents = pidfile.read()
       def invalid_pidfile():
         log.warn('Invalid ng pidfile %s contained: %s' % (self._pidfile, contents))
         return None
       endpoint = contents.split(':')
       if len(endpoint) != 2:
         return invalid_pidfile()
       pid, port = endpoint
       try:
         return int(pid.strip()), int(port.strip())
       except ValueError:
         return invalid_pidfile()
   elif NailgunTask._find:
     pid_port = NailgunTask._find(self._pidfile)
     if pid_port:
       self.context.log.info('found ng server @ pid:%d port:%d' % pid_port)
       with safe_open(self._pidfile, 'w') as pidfile:
         pidfile.write('%d:%d\n' % pid_port)
     return pid_port
   return None
示例#47
0
    def write(self, target, path, confs=None):
        def as_jar(internal_target):
            jar, _, _, _ = self.get_db(internal_target).as_jar_with_version(
                internal_target)
            return jar

        # TODO(John Sirois): a dict is used here to de-dup codegen targets which have both the original
        # codegen target - say java_thrift_library - and the synthetic generated target (java_library)
        # Consider reworking codegen tasks to add removal of the original codegen targets when rewriting
        # the graph
        dependencies = OrderedDict()
        for dep in target.internal_dependencies:
            jar = as_jar(dep)
            dependencies[(jar.org, jar.name)] = self.internaldep(jar)
        for jar in target.jar_dependencies:
            if jar.rev:
                dependencies[(jar.org, jar.name)] = self.jardep(jar)
        target_jar = self.internaldep(
            as_jar(target)).extend(dependencies=dependencies.values())

        template_kwargs = self.templateargs(target_jar, confs)
        with safe_open(path, 'w') as output:
            template = pkgutil.get_data(__name__, self.template_relpath)
            Generator(template, **template_kwargs).write(output)
示例#48
0
def select_binary(base_path, version, name, config=None):
  """Selects a binary matching the current os and architecture.

  Raises TaskError if no binary of the given version and name could be found.
  """
  # TODO(John Sirois): finish doc of the path structure expexcted under base_path
  config = config or Config.load()
  cachedir = config.getdefault('pants_cachedir', default=os.path.expanduser('~/.pants.d'))
  baseurl = config.getdefault('pants_support_baseurl')
  timeout_secs = config.getdefault('pants_support_fetch_timeout_secs', type=int, default=30)

  sysname, _, release, _, machine = os.uname()
  os_id = _ID_BY_OS[sysname.lower()]
  if os_id:
    middle_path = _PATH_BY_ID[os_id(release, machine)]
    if middle_path:
      binary_path = os.path.join(base_path, *(middle_path + [version, name]))
      cached_binary_path = os.path.join(cachedir, binary_path)
      if not os.path.exists(cached_binary_path):
        url = posixpath.join(baseurl, binary_path)
        log.info('Fetching %s binary from: %s' % (name, url))
        downloadpath = cached_binary_path + '~'
        try:
          with closing(urllib_request.urlopen(url, timeout=timeout_secs)) as binary:
            with safe_open(downloadpath, 'wb') as cached_binary:
              cached_binary.write(binary.read())

          os.rename(downloadpath, cached_binary_path)
          chmod_plus_x(cached_binary_path)
        except (IOError, urllib_error.HTTPError, urllib_error.URLError) as e:
          raise TaskError('Failed to fetch binary from %s: %s' % (url, e))
        finally:
          safe_delete(downloadpath)
      log.debug('Selected %s binary cached at: %s' % (name, cached_binary_path))
      return cached_binary_path
  raise TaskError('No %s binary found for: %s' % (name, (sysname, release, machine)))
示例#49
0
 def _cachepath(self, file):
     with safe_open(file, 'r') as cp:
         yield (path.strip() for path in cp.read().split(os.pathsep)
                if path.strip())
示例#50
0
    def test(self):
        self.assertEqual(set(), self.git.changed_files())
        self.assertEqual(set(['README']),
                         self.git.changed_files(from_commit='HEAD^'))

        tip_sha = self.git.commit_id
        self.assertTrue(tip_sha)

        self.assertTrue(tip_sha in self.git.changelog())

        self.assertTrue(self.git.tag_name.startswith('first-'),
                        msg='un-annotated tags should be found')
        self.assertEqual('master', self.git.branch_name)

        def edit_readme():
            with open(self.readme_file, 'a') as readme:
                readme.write('More data.')

        edit_readme()
        with open(os.path.join(self.worktree, 'INSTALL'), 'w') as untracked:
            untracked.write('make install')
        self.assertEqual(set(['README']), self.git.changed_files())
        self.assertEqual(set(['README', 'INSTALL']),
                         self.git.changed_files(include_untracked=True))

        try:
            # These changes should be rejected because our branch point from origin is 1 commit behind
            # the changes pushed there in clone 2.
            self.git.commit('API Changes.')
        except Scm.RemoteException:
            with environment_as(GIT_DIR=self.gitdir,
                                GIT_WORK_TREE=self.worktree):
                subprocess.check_call(
                    ['git', 'reset', '--hard', 'depot/master'])
            self.git.refresh()
            edit_readme()

        self.git.commit('''API '"' " Changes.''')
        self.git.tag('second', message='''Tagged ' " Changes''')

        with temporary_dir() as clone:
            with pushd(clone):
                self.init_repo('origin', self.origin)
                subprocess.check_call(
                    ['git', 'pull', '--tags', 'origin', 'master:master'])

                with open(os.path.realpath('README')) as readme:
                    self.assertEqual('--More data.', readme.read())

                git = Git()

                # Check that we can pick up committed and uncommitted changes.
                with safe_open(os.path.realpath('CHANGES'), 'w') as changes:
                    changes.write('none')
                subprocess.check_call(['git', 'add', 'CHANGES'])
                self.assertEqual(set(['README', 'CHANGES']),
                                 git.changed_files(from_commit='first'))

                self.assertEqual('master', git.branch_name)
                self.assertEqual('second',
                                 git.tag_name,
                                 msg='annotated tags should be found')
示例#51
0
 def emit_codehighlight_css(path, style):
   with safe_open(path, 'w') as css:
     css.write((HtmlFormatter(style=style)).get_style_defs('.codehilite'))
   return path
示例#52
0
  def process(self, outdir, base, source, fragmented, url_builder, get_config, css=None):
    def parse_url(spec):
      match = MarkdownToHtml.PANTS_LINK.match(spec)
      if match:
        page = Target.get(Address.parse(get_buildroot(), match.group(1)))
        anchor = match.group(2) or ''
        if not page:
          raise TaskError('Invalid link %s' % match.group(1))
        alias, url = url_builder(page, config=get_config(page))
        return alias, url + anchor
      else:
        return spec, spec

    def build_url(label):
      components = label.split('|', 1)
      if len(components) == 1:
        return parse_url(label.strip())
      else:
        alias, link = components
        _, url = parse_url(link.strip())
        return alias, url

    wikilinks = WikilinksExtension(build_url)

    path, ext = os.path.splitext(source)
    output_path = os.path.join(outdir, path + '.html')
    safe_mkdir(os.path.dirname(output_path))
    with codecs.open(output_path, 'w', 'utf-8') as output:
      with codecs.open(os.path.join(get_buildroot(), base, source), 'r', 'utf-8') as input:
        md_html = markdown.markdown(
          input.read(),
          extensions=['codehilite(guess_lang=False)', 'extra', 'tables', 'toc', wikilinks],
        )
        if fragmented:
          if css:
            with safe_open(css) as fd:
              output.write(textwrap.dedent('''
              <style type="text/css">
              %s
              </style>
              ''').strip() % fd.read())
              output.write('\n')
          output.write(md_html)
        else:
          if css:
            css_relpath = os.path.relpath(css, outdir)
            out_relpath = os.path.dirname(source)
            link_relpath = os.path.relpath(css_relpath, out_relpath)
            css = '<link rel="stylesheet" type="text/css" href="%s"/>' % link_relpath
          html = textwrap.dedent('''
          <html>
            <head>
              <meta charset="utf-8">
              %s
            </head>
            <body>
          <!-- generated by pants! -->
          %s
            </body>
          </html>
          ''').strip() % (css or '', md_html)
          output.write(html)
        return output.name
示例#53
0
 def _write_processor_info(self, processor_info_file, processors):
     with safe_open(processor_info_file, 'w') as f:
         for processor in processors:
             f.write('%s\n' % processor.strip())
示例#54
0
 def write(self, path, contents):
   with safe_open(path, 'w') as fp:
     fp.write(contents)
   return path
示例#55
0
    def generate_project(self, project):
        def linked_folder_id(source_set):
            return source_set.source_base.replace(os.path.sep, '.')

        def base_path(source_set):
            return os.path.join(source_set.root_dir, source_set.source_base)

        def create_source_base_template(source_set):
            source_base = base_path(source_set)
            return source_base, TemplateData(id=linked_folder_id(source_set),
                                             path=source_base)

        source_bases = dict(map(create_source_base_template, project.sources))
        if project.has_python:
            source_bases.update(
                map(create_source_base_template, project.py_sources))
            source_bases.update(
                map(create_source_base_template, project.py_libs))

        def create_source_template(base_id, includes=None, excludes=None):
            return TemplateData(
                base=base_id,
                includes='|'.join(OrderedSet(includes)) if includes else None,
                excludes='|'.join(OrderedSet(excludes)) if excludes else None,
            )

        def create_sourcepath(base_id, sources):
            def normalize_path_pattern(path):
                return '%s/' % path if not path.endswith('/') else path

            includes = [
                normalize_path_pattern(src_set.path) for src_set in sources
                if src_set.path
            ]
            excludes = []
            for source_set in sources:
                excludes.extend(
                    normalize_path_pattern(exclude)
                    for exclude in source_set.excludes)

            return create_source_template(base_id, includes, excludes)

        pythonpaths = []
        if project.has_python:
            for source_set in project.py_sources:
                pythonpaths.append(
                    create_source_template(linked_folder_id(source_set)))
            for source_set in project.py_libs:
                lib_path = source_set.path if source_set.path.endswith(
                    '.egg') else '%s/' % source_set.path
                pythonpaths.append(
                    create_source_template(linked_folder_id(source_set),
                                           includes=[lib_path]))

        configured_project = TemplateData(
            name=self.project_name,
            java=TemplateData(jdk=self.java_jdk,
                              language_level=('1.%d' %
                                              self.java_language_level)),
            python=project.has_python,
            scala=project.has_scala and not project.skip_scala,
            source_bases=source_bases.values(),
            pythonpaths=pythonpaths,
            debug_port=project.debug_port,
        )

        outdir = os.path.abspath(os.path.join(self.work_dir, 'bin'))
        safe_mkdir(outdir)

        source_sets = defaultdict(OrderedSet)  # base_id -> source_set
        for source_set in project.sources:
            source_sets[linked_folder_id(source_set)].add(source_set)
        sourcepaths = [
            create_sourcepath(base_id, sources)
            for base_id, sources in source_sets.items()
        ]

        libs = []

        def add_jarlibs(classpath_entries):
            for classpath_entry in classpath_entries:
                libs.append((classpath_entry.jar, classpath_entry.source_jar))

        add_jarlibs(project.internal_jars)
        add_jarlibs(project.external_jars)

        configured_classpath = TemplateData(
            sourcepaths=sourcepaths,
            has_tests=project.has_tests,
            libs=libs,
            scala=project.has_scala,

            # Eclipse insists the outdir be a relative path unlike other paths
            outdir=os.path.relpath(outdir, get_buildroot()),
        )

        def apply_template(output_path, template_relpath, **template_data):
            with safe_open(output_path, 'w') as output:
                Generator(pkgutil.get_data(__name__, template_relpath),
                          **template_data).write(output)

        apply_template(self.project_filename,
                       self.project_template,
                       project=configured_project)
        apply_template(self.classpath_filename,
                       self.classpath_template,
                       classpath=configured_classpath)
        apply_template(os.path.join(
            self.work_dir, 'Debug on port %d.launch' % project.debug_port),
                       self.debug_template,
                       project=configured_project)
        apply_template(self.coreprefs_filename,
                       self.coreprefs_template,
                       project=configured_project)

        for resource in _SETTINGS:
            with safe_open(os.path.join(self.cwd, '.settings', resource),
                           'w') as prefs:
                prefs.write(
                    pkgutil.get_data(
                        __name__, os.path.join('files', 'eclipse', resource)))

        factorypath = TemplateData(
            project_name=self.project_name,

            # The easiest way to make sure eclipse sees all annotation processors is to put all libs on
            # the apt factorypath - this does not seem to hurt eclipse performance in any noticeable way.
            jarpaths=libs)
        apply_template(self.apt_filename,
                       self.apt_template,
                       factorypath=factorypath)

        if project.has_python:
            apply_template(self.pydev_filename,
                           self.pydev_template,
                           project=configured_project)
        else:
            safe_delete(self.pydev_filename)

        print('\nGenerated project at %s%s' % (self.work_dir, os.sep))
示例#56
0
    def execute(self, targets):
        if not self._flatten and len(targets) > 1:
            topologically_sorted_targets = filter(
                JavaCompile._is_java,
                reversed(InternalTarget.sort_targets(targets)))
            for target in topologically_sorted_targets:
                self.execute([target])
            return

        self.context.log.info('Compiling targets %s' % str(targets))

        java_targets = filter(JavaCompile._is_java, targets)
        if java_targets:
            with self.context.state('classpath', []) as cp:
                for conf in self._confs:
                    cp.insert(0, (conf, self._resources_dir))
                    cp.insert(0, (conf, self._classes_dir))

                with self.changed(java_targets,
                                  invalidate_dependants=True) as changed:
                    sources_by_target, processors, fingerprint = self.calculate_sources(
                        changed)
                    if sources_by_target:
                        sources = reduce(
                            lambda all, sources: all.union(sources),
                            sources_by_target.values())
                        if not sources:
                            self.context.log.warn(
                                'Skipping java compile for targets with no sources:\n  %s'
                                % '\n  '.join(
                                    str(t) for t in sources_by_target.keys()))
                        else:
                            classpath = [
                                jar for conf, jar in cp if conf in self._confs
                            ]
                            result = self.compile(classpath, sources,
                                                  fingerprint)
                            if result != 0:
                                default_message = 'Unexpected error - %s returned %d' % (
                                    _JMAKE_MAIN, result)
                                raise TaskError(
                                    _JMAKE_ERROR_CODES.get(
                                        result, default_message))

                        if processors:
                            # Produce a monolithic apt processor service info file for further compilation rounds
                            # and the unit test classpath.
                            processor_info_file = os.path.join(
                                self._classes_dir, _PROCESSOR_INFO_FILE)
                            if os.path.exists(processor_info_file):
                                with safe_open(processor_info_file, 'r') as f:
                                    for processor in f:
                                        processors.add(processor.strip())
                            self.write_processor_info(processor_info_file,
                                                      processors)

            if self.context.products.isrequired('classes'):
                genmap = self.context.products.get('classes')

                # Map generated classes to the owning targets and sources.
                dependencies = Dependencies(self._classes_dir,
                                            self._dependencies_file)
                for target, classes_by_source in dependencies.findclasses(
                        targets).items():
                    for source, classes in classes_by_source.items():
                        genmap.add(source, self._classes_dir, classes)
                        genmap.add(target, self._classes_dir, classes)

                # TODO(John Sirois): Map target.resources in the same way
                # 'Map' (rewrite) annotation processor service info files to the owning targets.
                for target in targets:
                    if is_apt(target) and target.processors:
                        basedir = os.path.join(self._resources_dir, target.id)
                        processor_info_file = os.path.join(
                            basedir, _PROCESSOR_INFO_FILE)
                        self.write_processor_info(processor_info_file,
                                                  target.processors)
                        genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
示例#57
0
 def create_product(product):
   with safe_open(os.path.join(outdir, product), mode='w') as fp:
     fp.write(product)
   return product
示例#58
0
 def create_product(product):
   abspath = os.path.join(outdir, product)
   with safe_open(abspath, mode='w') as fp:
     fp.write(product)
   return abspath
示例#59
0
 def __enter__(self):
   self._file = safe_open(self._path, *self._args, **self._kwargs)
   return self._file
示例#60
0
  def changed(self, targets, only_buildfiles=False, invalidate_dependants=False):
    """
      Yields an iterable over the targets that have changed since the last check to a with block.
      If no exceptions are thrown by work in the block, the cache is updated for the targets,
      otherwise if a TargetError is thrown by the work in the block all targets except those in the
      TargetError are cached.

      :targets The targets to check for changes.
      :only_buildfiles If True, then just the target's BUILD files are checked for changes.
      :invalidate_dependants If True then any targets depending on changed targets are invalidated
      :returns: the subset of targets that have changed
    """

    safe_mkdir(self._basedir)
    cache_manager = Task.CacheManager(BuildCache(self._basedir), targets, only_buildfiles)

    check = self.invalidate_for()
    if check is not None:
      with safe_open(self._extradata, 'w') as pickled:
        pickle.dump(check, pickled)

      cache_key = cache_manager.check_content(Task.EXTRA_DATA, [self._extradata])
      if cache_key:
        self.context.log.debug('invalidating all targets for %s' % self.__class__.__name__)
        for target in targets:
          cache_manager.invalidate(target, cache_key)

    for target in targets:
      cache_manager.check(target)

    if invalidate_dependants and cache_manager.changed:
      for target in (self.context.dependants(lambda t: t in cache_manager.changed.keys())).keys():
        cache_manager.invalidate(target)

    if invalidate_dependants:
      if cache_manager.foreign_invalidated_targets:
        self.context.log.info('Invalidated %d dependant targets '
                              'for the next round' % cache_manager.foreign_invalidated_targets)

      if cache_manager.changed_files:
        msg = 'Operating on %d files in %d changed targets' % (
          cache_manager.changed_files,
          len(cache_manager.changed)
        )
        if cache_manager.invalidated_files:
          msg += ' and %d files in %d invalidated dependant targets' % (
            cache_manager.invalidated_files,
            cache_manager.invalidated_targets
          )
        self.context.log.info(msg)
    elif cache_manager.changed_files:
      self.context.log.info('Operating on %d files in %d changed targets' % (
        cache_manager.changed_files,
        len(cache_manager.changed)
      ))

    try:
      yield cache_manager.changed.keys()
      for cache_keys in cache_manager.changed.values():
        for cache_key in cache_keys:
          cache_manager.update(cache_key)
    except TargetError as e:
      for target, cache_keys in cache_manager.changed.items():
        if target not in e.targets:
          for cache_key in cache_keys:
            cache_manager.update(cache_key)