示例#1
0
  def _generate_ivy_report(self):
    classpath = binary_utils.nailgun_profile_classpath(self, self._profile)

    reports = []
    org, name = self._ivy_utils.identify()
    xsl = os.path.join(self._cachedir, 'ivy-report.xsl')
    safe_mkdir(self._outdir, clean=True)
    for conf in self._confs:
      params = dict(
        org=org,
        name=name,
        conf=conf
      )
      xml = self._ivy_utils.xml_report_path(conf)
      out = os.path.join(self._outdir, '%(org)s-%(name)s-%(conf)s.html' % params)
      args = ['-IN', xml, '-XSL', xsl, '-OUT', out]
      self.runjava('org.apache.xalan.xslt.Process', classpath=classpath, args=args)
      reports.append(out)

    css = os.path.join(self._outdir, 'ivy-report.css')
    if os.path.exists(css):
      os.unlink(css)
    shutil.copy(os.path.join(self._cachedir, 'ivy-report.css'), self._outdir)

    if self._open:
      binary_utils.open(*reports)
示例#2
0
def test_pex_builder():
  # test w/ and w/o zipfile dists
  with nested(temporary_dir(), make_bdist('p1', zipped=True)) as (td, p1):
    write_pex(td, exe_main, dists=[p1])

    success_txt = os.path.join(td, 'success.txt')
    PEX(td).run(args=[success_txt])
    assert os.path.exists(success_txt)
    with open(success_txt) as fp:
      assert fp.read() == 'success'

  # test w/ and w/o zipfile dists
  with nested(temporary_dir(), temporary_dir(), make_bdist('p1', zipped=True)) as (
      td1, td2, p1):
    target_egg_dir = os.path.join(td2, os.path.basename(p1.location))
    safe_mkdir(target_egg_dir)
    with closing(zipfile.ZipFile(p1.location, 'r')) as zf:
      zf.extractall(target_egg_dir)
    p1 = DistributionHelper.distribution_from_path(target_egg_dir)

    write_pex(td1, exe_main, dists=[p1])

    success_txt = os.path.join(td1, 'success.txt')
    PEX(td1).run(args=[success_txt])
    assert os.path.exists(success_txt)
    with open(success_txt) as fp:
      assert fp.read() == 'success'
示例#3
0
  def _mapjars(self, genmap, target):
    """
    Parameters:
      genmap: the jar_dependencies ProductMapping entry for the required products.
      target: the target whose jar dependencies are being retrieved.
    """
    mapdir = os.path.join(self._classpath_dir, target.id)
    safe_mkdir(mapdir, clean=True)
    ivyargs = [
      '-retrieve', '%s/[organisation]/[artifact]/[conf]/'
                   '[organisation]-[artifact]-[revision](-[classifier]).[ext]' % mapdir,
      '-symlink',
      '-confs',
    ]
    ivyargs.extend(target.configurations or self._confs)
    self._exec_ivy(mapdir, [target], ivyargs)

    for org in os.listdir(mapdir):
      orgdir = os.path.join(mapdir, org)
      if os.path.isdir(orgdir):
        for name in os.listdir(orgdir):
          artifactdir = os.path.join(orgdir, name)
          if os.path.isdir(artifactdir):
            for conf in os.listdir(artifactdir):
              confdir = os.path.join(artifactdir, conf)
              for file in os.listdir(confdir):
                if self._is_jar(file):
                  # TODO(John Sirois): kill the org and (org, name) exclude mappings in favor of a
                  # conf whitelist
                  genmap.add(org, confdir).append(file)
                  genmap.add((org, name), confdir).append(file)

                  genmap.add(target, confdir).append(file)
                  genmap.add((target, conf), confdir).append(file)
                  genmap.add((org, name, conf), confdir).append(file)
示例#4
0
  def create_binary(self, binary):
    import platform
    safe_mkdir(self.outdir)

    jarmap = self.context.products.get('jars')

    binary_jarname = '%s.jar' % binary.basename
    binaryjarpath = os.path.join(self.outdir, binary_jarname)
    self.context.log.info('creating %s' % os.path.relpath(binaryjarpath, get_buildroot()))

    with open_jar(binaryjarpath, 'w', compression=self.compression, allowZip64=self.zip64) as jar:
      def add_jars(target):
        generated = jarmap.get(target)
        if generated:
          for basedir, jars in generated.items():
            for internaljar in jars:
              self.dump(os.path.join(basedir, internaljar), jar)

      binary.walk(add_jars, is_internal)

      if self.deployjar:
        for basedir, externaljar in self.list_jar_dependencies(binary):
          self.dump(os.path.join(basedir, externaljar), jar)

      manifest = Manifest()
      manifest.addentry(Manifest.MANIFEST_VERSION, '1.0')
      manifest.addentry(
        Manifest.CREATED_BY,
        'python %s pants %s (Twitter, Inc.)' % (platform.python_version(), get_version())
      )
      main = binary.main or '*** java -jar not supported, please use -cp and pick a main ***'
      manifest.addentry(Manifest.MAIN_CLASS,  main)
      jar.writestr(Manifest.PATH, manifest.contents())

      jarmap.add(binary, self.outdir, [binary_jarname])
示例#5
0
  def map_internal_jars(self, targets):
    internal_jar_dir = os.path.join(self.work_dir, 'internal-libs')
    safe_mkdir(internal_jar_dir, clean=True)

    internal_source_jar_dir = os.path.join(self.work_dir, 'internal-libsources')
    safe_mkdir(internal_source_jar_dir, clean=True)

    internal_jars = self.context.products.get('jars')
    internal_source_jars = self.context.products.get('source_jars')
    for target in targets:
      mappings = internal_jars.get(target)
      if mappings:
        for base, jars in mappings.items():
          if len(jars) != 1:
            raise TaskError('Unexpected mapping, multiple jars for %s: %s' % (target, jars))

          jar = jars[0]
          cp_jar = os.path.join(internal_jar_dir, jar)
          shutil.copy(os.path.join(base, jar), cp_jar)

          cp_source_jar = None
          mappings = internal_source_jars.get(target)
          if mappings:
            for base, jars in mappings.items():
              if len(jars) != 1:
                raise TaskError(
                  'Unexpected mapping, multiple source jars for %s: %s' % (target, jars)
                )
              jar = jars[0]
              cp_source_jar = os.path.join(internal_source_jar_dir, jar)
              shutil.copy(os.path.join(base, jar), cp_source_jar)

          self._project.internal_jars.add(ClasspathEntry(cp_jar, cp_source_jar))
示例#6
0
def create_buildfile(root_dir, relpath, name='BUILD', content=''):
  path = os.path.join(root_dir, relpath)
  safe_mkdir(path)
  buildfile = os.path.join(path, name)
  with open(buildfile, 'a') as f:
    f.write(content)
  return BuildFile(root_dir, relpath)
示例#7
0
  def execute(self, targets):
    scala_targets = filter(ScalaCompile._has_scala_sources, targets)
    if scala_targets:
      safe_mkdir(self._depfile_dir)
      safe_mkdir(self._analysis_cache_dir)

      # Map from output directory to { analysis_cache_dir, [ analysis_cache_file ]}
      upstream_analysis_caches = self.context.products.get('upstream')

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          for jar in self._plugin_jars:
            cp.insert(0, (conf, jar))

      with self.invalidated(scala_targets, invalidate_dependants=True,
          partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.all_vts:
          if vt.valid:  # Don't compile, just post-process.
            self.post_process(vt, upstream_analysis_caches, split_artifact=False)
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          self.execute_single_compilation(vt, cp, upstream_analysis_caches)
          if not self.dry_run:
            vt.update()
      deps_cache = JvmDependencyCache(self, scala_targets)
      deps_cache.check_undeclared_dependencies()
示例#8
0
def setup_virtualenv_py(context):
  virtualenv_cache = context.config.get('python-setup', 'bootstrap_cache')
  virtualenv_target = context.config.get('python-setup', 'virtualenv_target')
  if not os.path.exists(virtualenv_cache):
    safe_mkdir(virtualenv_cache)
  if os.path.exists(os.path.join(virtualenv_target, 'virtualenv.py')):
    return True
  else:
    safe_mkdir(virtualenv_target)

  virtualenv_urls = context.config.getlist('python-setup', 'virtualenv_urls')
  tf = None
  for url in virtualenv_urls:
    try:
      ve_tgz = urlopen(url, timeout=5)
      ve_tgz_fp = StringIO(ve_tgz.read())
      ve_tgz_fp.seek(0)
      tf = tarfile.open(fileobj=ve_tgz_fp, mode='r:gz')
      break
    except Exception as e:
      context.log.warn('Failed to pull virtualenv from %s' % url)
      continue
  if not tf:
    raise TaskError('Could not download virtualenv!')
  try:
    tf.extractall(path=virtualenv_cache)
  except Exception as e:
    raise TaskError('Could not install virtualenv: %s' % e)
  context.log.info('Extracted %s' % url)
示例#9
0
  def execute(self, targets):
    catalog = self.context.products.isrequired('javadoc')
    if catalog and self.combined:
      raise TaskError('Cannot provide javadoc target mappings for combined output')

    with self.changed(filter(is_java, targets)) as changed_targets:
      safe_mkdir(self._output_dir)
      with self.context.state('classpath', []) as cp:
        classpath = [jar for conf, jar in cp if conf in self.confs]

        def find_javadoc_targets():
          if self.transitive:
            return changed_targets
          else:
            return set(changed_targets).intersection(set(self.context.target_roots))

        javadoc_targets = list(filter(is_java, find_javadoc_targets()))
        if self.combined:
          self.generate_combined(classpath, javadoc_targets)
        else:
          self.generate_individual(classpath, javadoc_targets)

    if catalog:
      for target in targets:
        gendir = self._gendir(target)
        javadocs = []
        for root, dirs, files in os.walk(gendir):
          javadocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
        self.context.products.get('javadoc').add(target, gendir, javadocs)
  def __init__(self, name, cmdline, sequence, pathspec, sandbox_dir, user=None, platform=None):
    """
      required:
        name        = name of the process
        cmdline     = cmdline of the process
        sequence    = the next available sequence number for state updates
        pathspec    = TaskPath object for synthesizing path names
        sandbox_dir = the sandbox in which to run the process
        platform    = Platform providing fork, clock, getpid

      optional:
        user        = the user to run as (if unspecified, will default to current user.)
                      if specified to a user that is not the current user, you must have root access
    """
    self._name = name
    self._cmdline = cmdline
    self._pathspec = pathspec
    self._seq = sequence
    self._sandbox = sandbox_dir
    if self._sandbox:
      safe_mkdir(self._sandbox)
    self._pid = None
    self._fork_time = None
    self._stdout = None
    self._stderr = None
    self._user = user
    if self._user:
      user, current_user = self._getpwuid() # may raise self.UnknownUserError
      if user != current_user and os.geteuid() != 0:
        raise self.PermissionError('Must be root to run processes as other users!')
    self._ckpt = None
    self._ckpt_head = -1
    if platform is None:
      raise ValueError("Platform must be specified")
    self._platform = platform
示例#11
0
文件: runner.py 项目: apache/aurora
 def control(self, force=False):
   """
     Bind to the checkpoint associated with this task, position to the end of the log if
     it exists, or create it if it doesn't.  Fails if we cannot get "leadership" i.e. a
     file lock on the checkpoint stream.
   """
   if self.is_terminal():
     raise self.StateError('Cannot take control of a task in terminal state.')
   if self._sandbox:
     safe_mkdir(self._sandbox)
   ckpt_file = self._pathspec.getpath('runner_checkpoint')
   try:
     self._ckpt = TaskRunnerHelper.open_checkpoint(ckpt_file, force=force, state=self._state)
   except TaskRunnerHelper.PermissionError:
     raise self.PermissionError('Unable to open checkpoint %s' % ckpt_file)
   log.debug('Flipping recovery mode off.')
   self._recovery = False
   self._set_task_status(self.task_state())
   self._resume_task()
   try:
     yield
   except Exception as e:
     log.error('Caught exception in self.control(): %s', e)
     log.error('  %s', traceback.format_exc())
   self._ckpt.close()
示例#12
0
文件: http.py 项目: jalons/commons
 def __init__(self, cache=None, failsoft=True, clock=time, opener=None):
     self._failsoft = failsoft
     self._cache = cache or safe_mkdtemp()
     safe_mkdir(self._cache)
     self._clock = clock
     self._opener = opener or Web()
     super(CachedWeb, self).__init__()
示例#13
0
  def run_thrifts(self):
    """
    Generate Python thrift code using thrift compiler specified in pants config.

    Thrift fields conflicting with Python keywords are suffixed with a trailing
    underscore (e.g.: from_).
    """

    def is_py_thrift(target):
      return isinstance(target, PythonThriftLibrary)

    all_thrifts = set()

    def collect_sources(target):
      abs_target_base = os.path.join(get_buildroot(), target.target_base)
      for source in target.payload.sources_relative_to_buildroot():
        source_root_relative_source = os.path.relpath(source, abs_target_base)
        all_thrifts.add((target.target_base, source_root_relative_source))

    self.target.walk(collect_sources, predicate=is_py_thrift)

    copied_sources = set()
    for base, relative_source in all_thrifts:
      abs_source = os.path.join(base, relative_source)
      copied_source = os.path.join(self._workdir, relative_source)

      safe_mkdir(os.path.dirname(copied_source))
      shutil.copyfile(abs_source, copied_source)
      copied_sources.add(self._modify_thrift(copied_source))

    for src in copied_sources:
      if not self._run_thrift(src):
        raise PythonThriftBuilder.CodeGenerationException("Could not generate .py from %s!" % src)
示例#14
0
  def execute(self):
    safe_mkdir(self.workdir)

    def jar_targets(predicate):
      return self.context.targets(predicate)

    def add_genjar(typename, target, name):
      self.context.products.get(typename).add(target, self.workdir).append(name)

    with self.context.new_workunit(name='jar-create', labels=[WorkUnit.MULTITOOL]):
      # TODO(Tejal Desai) pantsbuild/pants/65: Avoid creating 2 jars with java sources for
      # scala_library with java_sources. Currently publish fails fast if scala_library owning
      # java sources pointed by java_library target also provides an artifact. However, jar_create
      # ends up creating 2 jars one scala and other java both including the java_sources.
      if self.jar_classes:
        self._jar(jar_targets(is_jvm_library), functools.partial(add_genjar, 'jars'))

      if self.jar_sources:
        self.sourcejar(jar_targets(is_jvm_library), functools.partial(add_genjar, 'source_jars'))

      if self.jar_javadoc:
        javadoc_add_genjar = functools.partial(add_genjar, 'javadoc_jars')
        self.javadocjar(jar_targets(is_java_library),
                        self.context.products.get(javadoc.product_type),
                        javadoc_add_genjar)
        self.javadocjar(jar_targets(is_scala_library),
                        self.context.products.get(scaladoc.product_type),
                        javadoc_add_genjar)
示例#15
0
  def genlang(self, lang, targets):
    protobuf_binary = select_binary(
      self.protoc_supportdir,
      self.protoc_version,
      'protoc',
      self.context.config
    )

    bases, sources = self._calculate_sources(targets)

    if lang == 'java':
      safe_mkdir(self.java_out)
      gen = '--java_out=%s' % self.java_out
    elif lang == 'python':
      safe_mkdir(self.py_out)
      gen = '--python_out=%s' % self.py_out
    else:
      raise TaskError('Unrecognized protobuf gen lang: %s' % lang)

    args = [self.protobuf_binary, gen]

    for base in bases:
      args.append('--proto_path=%s' % base)

    args.extend(sources)
    log.debug('Executing: %s' % ' '.join(args))
    process = subprocess.Popen(args)
    result = process.wait()
    if result != 0:
      raise TaskError('%s ... exited non-zero (%i)' % (self.protobuf_binary, result))
示例#16
0
文件: ivy_utils.py 项目: aoen/pants
  def _generate_ivy(self, targets, jars, excludes, ivyxml, confs):
    org, name = self.identify(targets)

    # As it turns out force is not transitive - it only works for dependencies pants knows about
    # directly (declared in BUILD files - present in generated ivy.xml). The user-level ivy docs
    # don't make this clear [1], but the source code docs do (see isForce docs) [2]. I was able to
    # edit the generated ivy.xml and use the override feature [3] though and that does work
    # transitively as you'd hope.
    #
    # [1] http://ant.apache.org/ivy/history/2.3.0/settings/conflict-managers.html
    # [2] https://svn.apache.org/repos/asf/ant/ivy/core/branches/2.3.0/
    #     src/java/org/apache/ivy/core/module/descriptor/DependencyDescriptor.java
    # [3] http://ant.apache.org/ivy/history/2.3.0/ivyfile/override.html
    dependencies = [self._generate_jar_template(jar, confs) for jar in jars]
    overrides = [self._generate_override_template(dep) for dep in dependencies if dep.force]

    excludes = [self._generate_exclude_template(exclude) for exclude in excludes]

    template_data = TemplateData(
        org=org,
        module=name,
        version='latest.integration',
        publications=None,
        configurations=confs,
        dependencies=dependencies,
        excludes=excludes,
        overrides=overrides)

    safe_mkdir(os.path.dirname(ivyxml))
    with open(ivyxml, 'w') as output:
      generator = Generator(pkgutil.get_data(__name__, self._template_path),
                            root_dir=get_buildroot(),
                            lib=template_data)
      generator.write(output)
示例#17
0
def main():
  """Anonymize a set of analysis files using the same replacements in all of them.

  This maintains enough consistency to make splitting/merging tests realistic.

  To run:

  ./pants py src/python/pants/backend/jvm/tasks/jvm_compile:anonymize_zinc_analysis \
    <wordfile> <classes dir in analysis files> <analysis file glob 1> <analysis file glob 2> ...
  """
  word_file = sys.argv[1]
  classes_dir = sys.argv[2]
  analysis_files = list(itertools.chain.from_iterable([glob.glob(p) for p in sys.argv[3:]]))

  with open(word_file, 'r') as infile:
    word_list = infile.read().split()
  anonymizer = Anonymizer(word_list)
  for analysis_file in analysis_files:
    analysis = ZincAnalysisParser(classes_dir).parse_from_path(analysis_file)
    analysis.anonymize(anonymizer)
    output_dir = os.path.join(os.path.dirname(analysis_file), 'anon')
    safe_mkdir(output_dir)
    anonymized_filename = anonymizer.convert(os.path.basename(analysis_file))
    analysis.write_to_path(os.path.join(output_dir, anonymized_filename))
  anonymizer.check_for_comprehensiveness()
示例#18
0
  def create(self):
    log.debug('DirectorySandbox: mkdir %s' % self.root)

    try:
      safe_mkdir(self.root)
    except (IOError, OSError) as e:
      raise self.CreationError('Failed to create the sandbox: %s' % e)

    if self._user:
      pwent, grent = self.get_user_and_group()

      try:
        # Mesos provides a sandbox directory with permission 0750 owned by the user of the executor.
        # In case of Thermos this is `root`, as Thermos takes the responsibility to drop
        # privileges to the designated non-privileged user/role. To ensure non-provileged processes
        # can still read their sandbox, Thermos must also update the permissions of the scratch
        # directory created by Mesos.
        # This is necessary since Mesos 1.6.0 (https://issues.apache.org/jira/browse/MESOS-8332).
        log.debug('DirectorySandbox: chown %s:%s %s' % (self._user, grent.gr_name, self._mesos_dir))
        os.chown(self._mesos_dir, pwent.pw_uid, pwent.pw_gid)

        log.debug('DirectorySandbox: chown %s:%s %s' % (self._user, grent.gr_name, self.root))
        os.chown(self.root, pwent.pw_uid, pwent.pw_gid)
        log.debug('DirectorySandbox: chmod 700 %s' % self.root)
        os.chmod(self.root, 0700)
      except (IOError, OSError) as e:
        raise self.CreationError('Failed to chown/chmod the sandbox: %s' % e)
示例#19
0
  def genlang(self, lang, targets):
    if lang != 'java':
      raise TaskError('Unrecognized antlr gen lang: %s' % lang)

    # TODO: Instead of running the compiler for each target, collect the targets
    # by type and invoke it twice, once for antlr3 and once for antlr4.

    for target in targets:
      java_out = self._java_out(target)
      safe_mkdir(java_out)

      antlr_classpath = self._jvm_tool_bootstrapper.get_jvm_tool_classpath(target.compiler,
                                                                           self.runjava_indivisible)
      args = ["-o", java_out]

      if target.compiler == 'antlr3':
        java_main = 'org.antlr.Tool'
      elif target.compiler == 'antlr4':
        args.append("-visitor") # Generate Parse Tree Vistor As Well
        java_main = 'org.antlr.v4.Tool'
      else:
        raise TaskError("Unknown ANTLR compiler: {}".format(target.compiler))

      sources = self._calculate_sources([target])
      args.extend(sources)
      result = self.runjava_indivisible(java_main, classpath=antlr_classpath, args=args,
                                        workunit_name='antlr')
      if result != 0:
        raise TaskError
示例#20
0
  def genlang(self, lang, targets):
    bases, sources = self._calculate_sources(targets)

    if lang == 'java':
      safe_mkdir(self.java_out)
      gen = '--java_out=%s' % self.java_out
    elif lang == 'python':
      safe_mkdir(self.py_out)
      gen = '--python_out=%s' % self.py_out
    else:
      raise TaskError('Unrecognized protobuf gen lang: %s' % lang)

    args = [
      self.protobuf_binary,
      gen
    ]

    for base in bases:
      args.append('--proto_path=%s' % base)

    args.extend(sources)
    log.debug('Executing: %s' % ' '.join(args))
    process = subprocess.Popen(args)
    result = process.wait()
    if result != 0:
      raise TaskError
示例#21
0
    def _mapjars(self, genmap, target):
        mapdir = os.path.join(self._classpath_dir, target.id)
        safe_mkdir(mapdir, clean=True)
        ivyargs = [
            "-retrieve",
            "%s/[organisation]/[artifact]/[conf]/" "[organisation]-[artifact]-[revision](-[classifier]).[ext]" % mapdir,
            "-symlink",
            "-confs",
        ]
        ivyargs.extend(target.configurations or self._confs)
        self._exec_ivy(mapdir, [target], ivyargs)

        for org in os.listdir(mapdir):
            orgdir = os.path.join(mapdir, org)
            if os.path.isdir(orgdir):
                for name in os.listdir(orgdir):
                    artifactdir = os.path.join(orgdir, name)
                    if os.path.isdir(artifactdir):
                        for conf in os.listdir(artifactdir):
                            confdir = os.path.join(artifactdir, conf)
                            for file in os.listdir(confdir):
                                if self._is_jar(file):
                                    # TODO(John Sirois): kill the org and (org, name) exclude mappings in favor of a
                                    # conf whitelist
                                    genmap.add(org, confdir).append(file)
                                    genmap.add((org, name), confdir).append(file)

                                    genmap.add(target, confdir).append(file)
                                    genmap.add((target, conf), confdir).append(file)
                                    genmap.add((org, name, conf), confdir).append(file)
示例#22
0
    def execute(self, targets):
        def extract_resources(target):
            return target.resources if has_resources(target) else ()

        all_resources = set()
        for resources in map(extract_resources, targets):
            all_resources.update(resources)

        def target_dir(resources):
            return os.path.join(self.workdir, resources.id)

        with self.invalidated(all_resources) as invalidation_check:
            invalid_targets = set()
            for vt in invalidation_check.invalid_vts:
                invalid_targets.update(vt.targets)

            for resources in invalid_targets:
                resources_dir = target_dir(resources)
                safe_mkdir(resources_dir, clean=True)
                for resource in resources.sources:
                    basedir = os.path.dirname(resource)
                    destdir = os.path.join(resources_dir, basedir)
                    safe_mkdir(destdir)
                    shutil.copy(os.path.join(resources.target_base, resource), os.path.join(resources_dir, resource))

        genmap = self.context.products.get("resources")
        egroups = self.context.products.get_data("exclusives_groups")
        group_key = egroups.get_group_key_for_target(targets[0])

        for resources in all_resources:
            resources_dir = target_dir(resources)
            genmap.add(resources, resources_dir, resources.sources)
            for conf in self.confs:
                egroups.update_compatible_classpaths(group_key, [(conf, resources_dir)])
示例#23
0
  def execute(self, targets):
    java_targets = filter(_is_java, targets)
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      egroups = self.context.products.get_data('exclusives_groups')
      group_id = egroups.get_group_key_for_target(java_targets[0])
      for conf in self._confs:
        egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])
        egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])


      with self.invalidated(java_targets, invalidate_dependents=True,
                            partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          exclusives_classpath = egroups.get_classpath_for_group(group_id)
          self.execute_single_compilation(vt, exclusives_classpath)
          if not self.dry_run:
            vt.update()

        for vt in invalidation_check.all_vts:
          depfile = self.create_depfile_path(vt.targets)
          if not self.dry_run and os.path.exists(depfile):
            # Read in the deps created either just now or by a previous run on these targets.
            deps = Dependencies(self._classes_dir)
            deps.load(depfile)
            self._deps.merge(deps)

      if not self.dry_run:
        if self.context.products.isrequired('classes'):
          genmap = self.context.products.get('classes')
          # Map generated classes to the owning targets and sources.
          for target, classes_by_source in self._deps.findclasses(java_targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, self._classes_dir, classes)
              genmap.add(target, self._classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # 'Map' (rewrite) annotation processor service info files to the owning targets.
          for target in java_targets:
            if is_apt(target) and target.processors:
              basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
              processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
              self.write_processor_info(processor_info_file, target.processors)
              genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])

        # Produce a monolithic apt processor service info file for further compilation rounds
        # and the unit test classpath.
        all_processors = set()
        for target in java_targets:
          if is_apt(target) and target.processors:
            all_processors.update(target.processors)
        processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
        if os.path.exists(processor_info_file):
          with safe_open(processor_info_file, 'r') as f:
            for processor in f:
              all_processors.add(processor.strip())
        self.write_processor_info(processor_info_file, all_processors)
示例#24
0
  def _merge_classes_dir(self, state):
    """Merge the classes dirs from the underlying artifacts into a single dir.

    May symlink instead of copying, when it's OK to do so.

    Postcondition: symlinks are of leaf packages only.
    """
    if len(self.underlying_artifacts) <= 1:
      return
    self.log.debug('Merging classes dirs into %s' % self.classes_dir)
    symlinkable_packages = self._symlinkable_packages(state)
    for artifact in self.underlying_artifacts:
      classnames_by_package = defaultdict(list)
      for cls in state.classes_by_target.get(artifact.targets[0], []):
        classnames_by_package[os.path.dirname(cls)].append(os.path.basename(cls))

      for package, classnames in classnames_by_package.items():
        artifact_package_dir = os.path.join(artifact.classes_dir, package)
        merged_package_dir = os.path.join(self.classes_dir, package)

        if package in symlinkable_packages:
          if os.path.islink(merged_package_dir):
            assert os.readlink(merged_package_dir) == artifact_package_dir
          elif os.path.exists(merged_package_dir):
            safe_rmtree(merged_package_dir)
            os.symlink(artifact_package_dir, merged_package_dir)
          else:
            safe_mkdir(os.path.dirname(merged_package_dir))
            os.symlink(artifact_package_dir, merged_package_dir)
        else:
          safe_mkdir(merged_package_dir)
          for classname in classnames:
            src = os.path.join(artifact_package_dir, classname)
            dst = os.path.join(merged_package_dir, classname)
            self._maybe_hardlink(src, dst)
示例#25
0
  def execute(self, targets):
    safe_mkdir(self._output_dir)

    def jar_targets(predicate):
      return filter(predicate, (targets if self.transitive else self.context.target_roots))

    def add_genjar(typename, target, name):
      if self.context.products.isrequired(typename):
        self.context.products.get(typename).add(target, self._output_dir).append(name)

    if self.jar_classes:
      self.jar(jar_targets(is_jvm),
               self.context.products.get('classes'),
               functools.partial(add_genjar, 'jars'))

    if self.jar_idl:
      self.idljar(jar_targets(is_idl), functools.partial(add_genjar, 'idl_jars'))

    if self.jar_sources:
      self.sourcejar(jar_targets(is_jvm), functools.partial(add_genjar, 'source_jars'))

    if self.jar_javadoc:
      self.javadocjar(jar_targets(is_java),
                      self.context.products.get('javadoc'),
                      functools.partial(add_genjar, 'javadoc_jars'))
示例#26
0
  def execute(self, targets):
    scala_targets = filter(ScalaCompile._has_scala_sources, targets)
    if scala_targets:
      safe_mkdir(self._depfile_dir)
      safe_mkdir(self._analysis_cache_dir)

      # Map from output directory to { analysis_cache_dir, [ analysis_cache_file ]}
      upstream_analysis_caches = self.context.products.get('upstream')

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))

      with self.invalidated(scala_targets, invalidate_dependants=True) as invalidated:
        if self._flatten:
          # We must defer invalidation to zinc. If we exclude files from a repeat build, zinc will assume
          # the files were deleted and will nuke the corresponding class files. So we build all_targets
          # in one pass and let zinc figure it out.
          self.execute_single_compilation(invalidated.combined_all_versioned_targets(), cp, upstream_analysis_caches)
        else:
          # We must pass all targets,even valid ones, to execute_single_compilation(), so it can
          # track the deps and the upstream analysis map correctly.
          for vt in invalidated.all_versioned_targets():
            self.execute_single_compilation(vt, cp, upstream_analysis_caches)
            invalidated.update_versioned_target(vt)
示例#27
0
    def __init__(self, target, root_dir, extra_targets=None):
        self._config = Config.load()
        self._target = target
        self._root = root_dir
        self._cache = BuildCache(
            os.path.join(self._config.get("python-setup", "artifact_cache"), "%s" % PythonIdentity.get())
        )
        self._extra_targets = list(extra_targets) if extra_targets is not None else []
        self._extra_targets.append(self._get_common_python())

        cachedir = self._config.get("python-setup", "cache")
        safe_mkdir(cachedir)
        self._eggcache = cachedir

        local_repo = "file://%s" % os.path.realpath(cachedir)
        self._repos = [local_repo] + self._config.getlist("python-setup", "repos")
        self._fetcher = ReqFetcher(repos=self._repos, cache=cachedir)
        self._index = None
        for index in self._config.getlist("python-setup", "indices"):
            if PythonChroot.can_contact_index(index):
                self._index = index
                break
        self._additional_reqs = set()

        distdir = self._config.getdefault("pants_distdir")
        distpath = tempfile.mktemp(dir=distdir, prefix=target.name)
        self.env = PythonEnvironment(distpath)
示例#28
0
文件: jaxb_gen.py 项目: aoen/pants
  def genlang(self, lang, targets):
    if lang != 'java':
      raise TaskError('Unrecognized jaxb language: %s' % lang)
    output_dir = os.path.join(self.workdir, 'gen-java')
    safe_mkdir(output_dir)
    cache = []

    for target in targets:
      if not isinstance(target, JaxbLibrary):
        raise TaskError('Invalid target type "{class_type}" (expected JaxbLibrary)'
                        .format(class_type=type(target).__name__))

      target_files = []
      for source in target.sources_relative_to_buildroot():
        path_to_xsd = source
        output_package = target.package

        if output_package is None:
          output_package = self._guess_package(source)
        output_package = self._correct_package(output_package)

        output_directory = output_dir
        safe_mkdir(output_directory)
        args = ['-p', output_package, '-d', output_directory, path_to_xsd]
        result = self._compile_schema(args)

        if result != 0:
          raise TaskError('xjc ... exited non-zero ({code})'.format(code=result))
        target_files.append(self._sources_to_be_generated(target.package, path_to_xsd))
      cache.append((target, target_files))

    return cache
示例#29
0
  def execute_codegen(self, targets):
    sources = self._calculate_sources(targets, lambda t: isinstance(t, SpindleThriftLibrary))
    bases = set(
      target.target_base
      for target in self.context.targets(lambda t: isinstance(t, SpindleThriftLibrary))
    )
    scalate_workdir = os.path.join(self.workdir, 'scalate_workdir')
    safe_mkdir(self.namespace_out)
    safe_mkdir(scalate_workdir)

    args = [
      '--template', 'scala/record.ssp',
      '--java_template', 'javagen/record.ssp',
      '--thrift_include', ':'.join(bases),
      '--namespace_out', self.namespace_out,
      '--working_dir', scalate_workdir,
    ]
    args.extend(sources)

    result = self.runjava(classpath=self.spindle_classpath,
                          main='com.foursquare.spindle.codegen.binary.ThriftCodegen',
                          jvm_options=self.get_options().jvm_options,
                          args=args,
                          workunit_name='generate')
    if result != 0:
      raise TaskError('{} returned {}'.format(self.main_class, result))
示例#30
0
文件: ivy_utils.py 项目: aoen/pants
  def symlink_cachepath(ivy_home, inpath, symlink_dir, outpath):
    """Symlinks all paths listed in inpath that are under ivy_home into symlink_dir.

    Preserves all other paths. Writes the resulting paths to outpath.
    Returns a map of path -> symlink to that path.
    """
    safe_mkdir(symlink_dir)
    with safe_open(inpath, 'r') as infile:
      paths = filter(None, infile.read().strip().split(os.pathsep))
    new_paths = []
    for path in paths:
      if not path.startswith(ivy_home):
        new_paths.append(path)
        continue
      symlink = os.path.join(symlink_dir, os.path.relpath(path, ivy_home))
      try:
        os.makedirs(os.path.dirname(symlink))
      except OSError as e:
        if e.errno != errno.EEXIST:
          raise
      # Note: The try blocks cannot be combined. It may be that the dir exists but the link doesn't.
      try:
        os.symlink(path, symlink)
      except OSError as e:
        # We don't delete and recreate the symlink, as this may break concurrently executing code.
        if e.errno != errno.EEXIST:
          raise
      new_paths.append(symlink)
    with safe_open(outpath, 'w') as outfile:
      outfile.write(':'.join(new_paths))
    symlink_map = dict(zip(paths, new_paths))
    return symlink_map
示例#31
0
def _initialize_disk_logging():
    safe_mkdir(LogOptions.log_dir())
示例#32
0
 def __init__(self, root):
     self._root = os.path.join(root, GLOBAL_CACHE_KEY_GEN_VERSION)
     safe_mkdir(self._root)
示例#33
0
    def execute(self, targets):
        java_targets = filter(_is_java, targets)
        if java_targets:
            safe_mkdir(self._classes_dir)
            safe_mkdir(self._depfile_dir)

            with self.context.state('classpath', []) as cp:
                for conf in self._confs:
                    cp.insert(0, (conf, self._resources_dir))
                    cp.insert(0, (conf, self._classes_dir))

            with self.invalidated(java_targets,
                                  invalidate_dependents=True,
                                  partition_size_hint=self._partition_size_hint
                                  ) as invalidation_check:
                for vt in invalidation_check.invalid_vts_partitioned:
                    # Compile, using partitions for efficiency.
                    self.execute_single_compilation(vt, cp)
                    if not self.dry_run:
                        vt.update()

                for vt in invalidation_check.all_vts:
                    depfile = self.create_depfile_path(vt.targets)
                    if not self.dry_run and os.path.exists(depfile):
                        # Read in the deps created either just now or by a previous run on these targets.
                        deps = Dependencies(self._classes_dir)
                        deps.load(depfile)
                        self._deps.merge(deps)

            if not self.dry_run:
                if self.context.products.isrequired('classes'):
                    genmap = self.context.products.get('classes')
                    # Map generated classes to the owning targets and sources.
                    for target, classes_by_source in self._deps.findclasses(
                            java_targets).items():
                        for source, classes in classes_by_source.items():
                            genmap.add(source, self._classes_dir, classes)
                            genmap.add(target, self._classes_dir, classes)

                    # TODO(John Sirois): Map target.resources in the same way
                    # 'Map' (rewrite) annotation processor service info files to the owning targets.
                    for target in java_targets:
                        if is_apt(target) and target.processors:
                            basedir = os.path.join(
                                self._resources_dir,
                                Target.maybe_readable_identify([target]))
                            processor_info_file = os.path.join(
                                basedir, _PROCESSOR_INFO_FILE)
                            self.write_processor_info(processor_info_file,
                                                      target.processors)
                            genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])

                # Produce a monolithic apt processor service info file for further compilation rounds
                # and the unit test classpath.
                all_processors = set()
                for target in java_targets:
                    if is_apt(target) and target.processors:
                        all_processors.update(target.processors)
                processor_info_file = os.path.join(self._classes_dir,
                                                   _PROCESSOR_INFO_FILE)
                if os.path.exists(processor_info_file):
                    with safe_open(processor_info_file, 'r') as f:
                        for processor in f:
                            all_processors.add(processor.strip())
                self.write_processor_info(processor_info_file, all_processors)
示例#34
0
    def generate_project(self, project):
        def linked_folder_id(source_set):
            return source_set.source_base.replace(os.path.sep, '.')

        def base_path(source_set):
            return os.path.join(source_set.root_dir, source_set.source_base)

        def create_source_base_template(source_set):
            source_base = base_path(source_set)
            return source_base, TemplateData(id=linked_folder_id(source_set),
                                             path=source_base)

        source_bases = dict(map(create_source_base_template, project.sources))
        if project.has_python:
            source_bases.update(
                map(create_source_base_template, project.py_sources))
            source_bases.update(
                map(create_source_base_template, project.py_libs))

        def create_source_template(base_id, includes=None, excludes=None):
            return TemplateData(
                base=base_id,
                includes='|'.join(OrderedSet(includes)) if includes else None,
                excludes='|'.join(OrderedSet(excludes)) if excludes else None,
            )

        def create_sourcepath(base_id, sources):
            def normalize_path_pattern(path):
                return '%s/' % path if not path.endswith('/') else path

            includes = [
                normalize_path_pattern(src_set.path) for src_set in sources
                if src_set.path
            ]
            excludes = []
            for source_set in sources:
                excludes.extend(
                    normalize_path_pattern(exclude)
                    for exclude in source_set.excludes)

            return create_source_template(base_id, includes, excludes)

        pythonpaths = []
        if project.has_python:
            for source_set in project.py_sources:
                pythonpaths.append(
                    create_source_template(linked_folder_id(source_set)))
            for source_set in project.py_libs:
                lib_path = source_set.path if source_set.path.endswith(
                    '.egg') else '%s/' % source_set.path
                pythonpaths.append(
                    create_source_template(linked_folder_id(source_set),
                                           includes=[lib_path]))

        configured_project = TemplateData(
            name=self.project_name,
            java=TemplateData(jdk=self.java_jdk,
                              language_level=('1.%d' %
                                              self.java_language_level)),
            python=project.has_python,
            scala=project.has_scala and not project.skip_scala,
            source_bases=source_bases.values(),
            pythonpaths=pythonpaths,
            debug_port=project.debug_port,
        )

        outdir = os.path.abspath(os.path.join(self.work_dir, 'bin'))
        safe_mkdir(outdir)

        source_sets = defaultdict(OrderedSet)  # base_id -> source_set
        for source_set in project.sources:
            source_sets[linked_folder_id(source_set)].add(source_set)
        sourcepaths = [
            create_sourcepath(base_id, sources)
            for base_id, sources in source_sets.items()
        ]

        libs = []

        def add_jarlibs(classpath_entries):
            for classpath_entry in classpath_entries:
                # TODO(John Sirois): Plumb javadoc jars
                libs.append((classpath_entry.jar, classpath_entry.source_jar))

        add_jarlibs(project.internal_jars)
        add_jarlibs(project.external_jars)

        configured_classpath = TemplateData(
            sourcepaths=sourcepaths,
            has_tests=project.has_tests,
            libs=libs,
            scala=project.has_scala,

            # Eclipse insists the outdir be a relative path unlike other paths
            outdir=os.path.relpath(outdir, get_buildroot()),
        )

        def apply_template(output_path, template_relpath, **template_data):
            with safe_open(output_path, 'w') as output:
                Generator(pkgutil.get_data(__name__, template_relpath),
                          **template_data).write(output)

        apply_template(self.project_filename,
                       self.project_template,
                       project=configured_project)
        apply_template(self.classpath_filename,
                       self.classpath_template,
                       classpath=configured_classpath)
        apply_template(os.path.join(
            self.work_dir, 'Debug on port %d.launch' % project.debug_port),
                       self.debug_template,
                       project=configured_project)
        apply_template(self.coreprefs_filename,
                       self.coreprefs_template,
                       project=configured_project)

        for resource in _SETTINGS:
            with safe_open(os.path.join(self.cwd, '.settings', resource),
                           'w') as prefs:
                prefs.write(
                    pkgutil.get_data(
                        __name__, os.path.join('files', 'eclipse', resource)))

        factorypath = TemplateData(
            project_name=self.project_name,

            # The easiest way to make sure eclipse sees all annotation processors is to put all libs on
            # the apt factorypath - this does not seem to hurt eclipse performance in any noticeable way.
            jarpaths=libs)
        apply_template(self.apt_filename,
                       self.apt_template,
                       factorypath=factorypath)

        if project.has_python:
            apply_template(self.pydev_filename,
                           self.pydev_template,
                           project=configured_project)
        else:
            safe_delete(self.pydev_filename)

        print('\nGenerated project at %s%s' % (self.work_dir, os.sep))
示例#35
0
 def open(self):
     """Implementation of Reporter callback."""
     safe_mkdir(os.path.dirname(self._html_dir))
     self._report_file = open(self.report_path(), 'w')
示例#36
0
 def makedirs(cls, path):
     safe_mkdir(os.path.join(BuildFileTest.root_dir, path))
示例#37
0
文件: sandbox.py 项目: brinick/aurora
 def do_mount(source, destination):
     safe_mkdir(destination)
     log.info('Mounting %s into task filesystem at %s.' %
              (source, destination))
     subprocess.check_call(['mount', '--bind', source, destination])
示例#38
0
def setup_sandbox(td, taskpath):
    sandbox = os.path.join(td, 'sandbox')
    safe_mkdir(sandbox)
    safe_mkdir(taskpath.getpath('process_logbase'))
    safe_mkdir(os.path.dirname(taskpath.getpath('process_checkpoint')))
    return sandbox
示例#39
0
 def force_invalidate_all(self):
     """Force-invalidates all cached items."""
     safe_mkdir(self._root, clean=True)
示例#40
0
    def process(self,
                outdir,
                base,
                source,
                fragmented,
                url_builder,
                get_config,
                css=None):
        def parse_url(spec):
            match = MarkdownToHtml.PANTS_LINK.match(spec)
            if match:
                page = Target.get(
                    Address.parse(get_buildroot(), match.group(1)))
                anchor = match.group(2) or ''
                if not page:
                    raise TaskError('Invalid link %s' % match.group(1))
                alias, url = url_builder(page, config=get_config(page))
                return alias, url + anchor
            else:
                return spec, spec

        def build_url(label):
            components = label.split('|', 1)
            if len(components) == 1:
                return parse_url(label.strip())
            else:
                alias, link = components
                _, url = parse_url(link.strip())
                return alias, url

        wikilinks = WikilinksExtension(build_url)

        path, ext = os.path.splitext(source)
        output_path = os.path.join(outdir, path + '.html')
        safe_mkdir(os.path.dirname(output_path))
        with codecs.open(output_path, 'w', 'utf-8') as output:
            with codecs.open(os.path.join(get_buildroot(), base, source), 'r',
                             'utf-8') as input:
                md_html = markdown.markdown(
                    input.read(),
                    extensions=[
                        'codehilite(guess_lang=False)', 'extra', 'tables',
                        'toc', wikilinks
                    ],
                )
                if fragmented:
                    if css:
                        with safe_open(css) as fd:
                            output.write(
                                textwrap.dedent('''
              <style type="text/css">
              %s
              </style>
              ''').strip() % fd.read())
                            output.write('\n')
                    output.write(md_html)
                else:
                    if css:
                        css_relpath = os.path.relpath(css, outdir)
                        out_relpath = os.path.dirname(source)
                        link_relpath = os.path.relpath(css_relpath,
                                                       out_relpath)
                        css = '<link rel="stylesheet" type="text/css" href="%s"/>' % link_relpath
                    html = textwrap.dedent('''
          <html>
            <head>
              <meta charset="utf-8">
              %s
            </head>
            <body>
          <!-- generated by pants! -->
          %s
            </body>
          </html>
          ''').strip() % (css or '', md_html)
                    output.write(html)
                return output.name
示例#41
0
    def execute(self, targets):
        self.check_clean_master(commit=(not self._dryrun and self._commit))

        safe_mkdir(self._output_dir)

        self.gem(filter(is_ruby_library, self.context.target_roots))
示例#42
0
 def write(package, name, content):
     package_path = os.path.join(td, SetupPy.SOURCE_ROOT,
                                 to_path(package))
     safe_mkdir(os.path.dirname(os.path.join(package_path, name)))
     with open(os.path.join(package_path, name), 'w') as fp:
         fp.write(content)
示例#43
0
    def generate_project(self, project):
        def linked_folder_id(path):
            return path.replace(os.path.sep, '.')

        def base_path(source_set):
            return os.path.join(source_set.root_dir, source_set.source_base)

        source_bases = {}

        def add_source_base(path, id):
            source_bases[path] = id

        for source_set in project.sources:
            add_source_base(base_path(source_set),
                            linked_folder_id(source_set.source_base))
        if project.has_python:
            for source_set in project.py_sources:
                add_source_base(base_path(source_set),
                                linked_folder_id(source_set.source_base))
            for source_set in project.py_libs:
                add_source_base(base_path(source_set),
                                linked_folder_id(source_set.source_base))

        def create_source_template(base, includes=None, excludes=None):
            return TemplateData(
                base=source_bases[base],
                includes=includes or [],
                excludes=excludes or [],
                joined_includes='|'.join(includes) if includes else '',
                joined_excludes='|'.join(excludes) if excludes else '',
            )

        def create_sourcepath(base, sources):
            def normalize_path_pattern(path):
                return '%s/' % path if not path.endswith('/') else path

            includes = [
                normalize_path_pattern(src_set.path) for src_set in sources
                if src_set.path
            ]
            excludes = []
            for source_set in sources:
                excludes.extend(
                    normalize_path_pattern(exclude)
                    for exclude in source_set.excludes)

            return create_source_template(base, includes, excludes)

        pythonpaths = []
        if project.has_python:
            for source_set in project.py_sources:
                pythonpaths.append(
                    create_source_template(base_path(source_set)))
            for source_set in project.py_libs:
                lib_path = source_set.path if source_set.path.endswith(
                    '.egg') else '%s/' % source_set.path
                pythonpaths.append(
                    create_source_template(base_path(source_set),
                                           includes=[lib_path]))

        source_bases_list = [{
            'path': path,
            'id': id
        } for (path, id) in source_bases.items()]
        configured_project = TemplateData(
            name=self.project_name,
            has_python=project.has_python,
            has_scala=project.has_scala and not project.skip_scala,
            source_bases=source_bases_list,
            pythonpaths=pythonpaths,
            debug_port=project.debug_port,
        )

        outdir = os.path.abspath(os.path.join(self.work_dir, 'bin'))
        safe_mkdir(outdir)

        source_sets = defaultdict(OrderedSet)  # base -> source_set
        for source_set in project.sources:
            source_sets[base_path(source_set)].add(source_set)
        sourcepaths = [
            create_sourcepath(base, sources)
            for base, sources in source_sets.items()
        ]

        libs = []

        def add_jarlibs(classpath_entries):
            for classpath_entry in classpath_entries:
                jar = classpath_entry.jar
                source_jar = classpath_entry.source_jar
                libs.append(
                    TemplateData(
                        jar=os.path.relpath(jar, self.cwd),
                        source_jar=os.path.relpath(source_jar, self.cwd)
                        if source_jar else None))

        add_jarlibs(project.internal_jars)
        add_jarlibs(project.external_jars)

        configured_classpath = TemplateData(
            sourcepaths=sourcepaths,
            has_tests=project.has_tests,
            libs=libs,
            has_scala=project.has_scala,
            outdir=os.path.relpath(outdir, get_buildroot()),
        )

        with safe_open(self.project_filename, 'w') as output:
            Generator(pkgutil.get_data(__name__, self.project_template),
                      project=configured_project).write(output)

        with safe_open(self.classpath_filename, 'w') as output:
            Generator(pkgutil.get_data(__name__, self.classpath_template),
                      classpath=configured_classpath).write(output)

        debug_filename = os.path.join(
            self.work_dir, 'Debug on port %d.launch' % project.debug_port)
        with safe_open(debug_filename, 'w') as output:
            Generator(pkgutil.get_data(__name__, self.debug_template),
                      project=configured_project).write(output)

        for resource in _SETTINGS:
            with safe_open(os.path.join(self.cwd, '.settings', resource),
                           'w') as prefs:
                prefs.write(
                    pkgutil.get_data(
                        __name__, os.path.join('eclipse', 'files', resource)))

        factorypath = TemplateData(
            project_name=self.project_name,

            # The easiest way to make sure eclipse sees all annotation processors is to put all libs on
            # the apt factorypath - this does not seem to hurt eclipse performance in any noticeable way.
            jarpaths=[
                "('%s', %s)" % (lib.jar, "'%s'" %
                                lib.source_jar if lib.source_jar else 'None')
                for lib in libs
            ])
        with open(self.apt_filename, 'w') as output:
            Generator(pkgutil.get_data(__name__, self.apt_template),
                      factorypath=factorypath).write(output)

        if project.has_python:
            with safe_open(self.pydev_filename, 'w') as output:
                Generator(pkgutil.get_data(__name__, self.pydev_template),
                          project=configured_project).write(output)
        else:
            if os.path.exists(self.pydev_filename):
                os.remove(self.pydev_filename)

        print('\nGenerated project at %s%s' % (self.work_dir, os.sep))
示例#44
0
    def execute_single_compilation(self, versioned_target_set, cp,
                                   upstream_analysis_caches):
        """Execute a single compilation, updating upstream_analysis_caches if needed."""
        if self._flatten:
            compilation_id = 'flat'
            output_dir = self._flat_classes_dir
        else:
            compilation_id = Target.maybe_readable_identify(
                versioned_target_set.targets)
            # Each compilation must output to its own directory, so zinc can then associate those with the appropriate
            # analysis caches of previous compilations. We then copy the results out to the real output dir.
            output_dir = os.path.join(self._incremental_classes_dir,
                                      compilation_id)

        depfile = os.path.join(self._depfile_dir,
                               compilation_id) + '.dependencies'
        analysis_cache = os.path.join(self._analysis_cache_dir,
                                      compilation_id) + '.analysis_cache'

        safe_mkdir(output_dir)

        if not versioned_target_set.valid:
            with self.check_artifact_cache(
                    versioned_target_set,
                    build_artifacts=[output_dir, depfile, analysis_cache],
                    artifact_root=self._workdir) as needs_building:
                if needs_building:
                    self.context.log.info('Compiling targets %s' %
                                          versioned_target_set.targets)
                    sources_by_target = self.calculate_sources(
                        versioned_target_set.targets)
                    if sources_by_target:
                        sources = reduce(
                            lambda all, sources: all.union(sources),
                            sources_by_target.values())
                        if not sources:
                            touch(
                                depfile
                            )  # Create an empty depfile, since downstream code may assume that one exists.
                            self.context.log.warn(
                                'Skipping scala compile for targets with no sources:\n  %s'
                                % '\n  '.join(
                                    str(t) for t in sources_by_target.keys()))
                        else:
                            classpath = [
                                jar for conf, jar in cp if conf in self._confs
                            ]
                            result = self.compile(classpath, sources,
                                                  output_dir, analysis_cache,
                                                  upstream_analysis_caches,
                                                  depfile)
                            if result != 0:
                                raise TaskError('%s returned %d' %
                                                (self._main, result))

        # Note that the following post-processing steps must happen even for valid targets.

        # Read in the deps created either just now or by a previous compiler run on these targets.
        if self.context.products.isrequired('classes'):
            self.context.log.debug('Reading dependencies from ' + depfile)
            deps = Dependencies(output_dir)
            deps.load(depfile)

            genmap = self.context.products.get('classes')

            for target, classes_by_source in deps.findclasses(
                    versioned_target_set.targets).items():
                for source, classes in classes_by_source.items():
                    genmap.add(source, output_dir, classes)
                    genmap.add(target, output_dir, classes)

            # TODO(John Sirois): Map target.resources in the same way
            # Create and Map scala plugin info files to the owning targets.
            for target in versioned_target_set.targets:
                if is_scalac_plugin(target) and target.classname:
                    basedir = self.write_plugin_info(target)
                    genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

        # Update the upstream analysis map.
        analysis_cache_parts = os.path.split(analysis_cache)
        if not upstream_analysis_caches.has(output_dir):
            # A previous chunk might have already updated this. It is certainly possible for a later chunk to
            # independently depend on some target that a previous chunk already built.
            upstream_analysis_caches.add(output_dir, analysis_cache_parts[0],
                                         [analysis_cache_parts[1]])

        # Update the classpath.
        with self.context.state('classpath', []) as cp:
            for conf in self._confs:
                cp.insert(0, (conf, output_dir))
示例#45
0
    def bundle(self, app):
        """Create a self-contained application bundle containing the target
    classes, dependencies and resources.
    """
        assert (isinstance(app, BundleCreate.App))

        bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)
        self.context.log.info('creating %s' %
                              os.path.relpath(bundledir, get_buildroot()))

        safe_mkdir(bundledir, clean=True)

        classpath = OrderedSet()
        if not self.deployjar:
            libdir = os.path.join(bundledir, 'libs')
            os.mkdir(libdir)

            # Add internal dependencies to the bundle.
            def add_jars(target):
                target_jars = self.context.products.get('jars').get(target)
                if target_jars is not None:
                    for basedir, jars in target_jars.items():
                        for internaljar in jars:
                            os.symlink(os.path.join(basedir, internaljar),
                                       os.path.join(libdir, internaljar))
                            classpath.add(internaljar)

            app.binary.walk(add_jars, lambda t: t.is_internal)

            # Add external dependencies to the bundle.
            for basedir, externaljar in self.list_jar_dependencies(app.binary):
                path = os.path.join(basedir, externaljar)
                os.symlink(path, os.path.join(libdir, externaljar))
                classpath.add(externaljar)

        for basedir, jars in self.context.products.get('jars').get(
                app.binary).items():
            if len(jars) != 1:
                raise TaskError(
                    'Expected 1 mapped binary for %s but found: %s' %
                    (app.binary, jars))

            binary = jars[0]
            binary_jar = os.path.join(basedir, binary)
            bundle_jar = os.path.join(bundledir, binary)
            if not classpath:
                os.symlink(binary_jar, bundle_jar)
            else:
                with open_zip(binary_jar, 'r') as src:
                    with open_zip(bundle_jar, 'w',
                                  compression=ZIP_DEFLATED) as dest:
                        for item in src.infolist():
                            buf = src.read(item.filename)
                            if Manifest.PATH == item.filename:
                                manifest = Manifest(buf)
                                manifest.addentry(
                                    Manifest.CLASS_PATH, ' '.join(
                                        os.path.join('libs', jar)
                                        for jar in classpath))
                                buf = manifest.contents()
                            dest.writestr(item, buf)

        for bundle in app.bundles:
            for path, relpath in bundle.filemap.items():
                bundlepath = os.path.join(bundledir, relpath)
                safe_mkdir(os.path.dirname(bundlepath))
                os.symlink(path, bundlepath)

        return bundledir
示例#46
0
 def __init__(self, dir=None):
   def init_stat():
     return CacheStat([],[])
   self.stats_per_cache = defaultdict(init_stat)
   self._dir = dir
   safe_mkdir(self._dir)
示例#47
0
  def generate_project(self, project):
    def create_content_root(source_set):
      root_relative_path = os.path.join(source_set.source_base, source_set.path) \
                           if source_set.path else source_set.source_base
      return TemplateData(
        path = root_relative_path,
        sources = [ TemplateData(
          path = root_relative_path,
          package_prefix = source_set.path.replace('/', '.') if source_set.path else None,
          is_test = source_set.is_test,
        ) ],
        exclude_paths = [ os.path.join(source_set.source_base, x) for x in source_set.excludes ],
      )

    content_roots = [create_content_root(source_set) for source_set in project.sources]
    if project.has_python:
      content_roots.extend(create_content_root(source_set) for source_set in project.py_sources)

    configured_module = TemplateData(
      root_dir = get_buildroot(),
      path = self.module_filename,
      content_roots = content_roots,
      has_bash = self.bash,
      has_python = project.has_python,
      has_scala = project.has_scala,
      has_tests = project.has_tests,
      internal_jars = [cp_entry.jar for cp_entry in project.internal_jars],
      internal_source_jars = [cp_entry.source_jar for cp_entry in project.internal_jars
                              if cp_entry.source_jar],
      external_jars = [cp_entry.jar for cp_entry in project.external_jars],
      external_source_jars = [cp_entry.source_jar for cp_entry in project.external_jars
                              if cp_entry.source_jar],
      extra_components = [],
    )

    outdir = os.path.abspath(self.intellij_output_dir)
    if not os.path.exists(outdir):
      os.makedirs(outdir)

    configured_project = TemplateData(
      root_dir = get_buildroot(),
      outdir = outdir,
      modules = [ configured_module ],
      java_encoding = self.java_encoding,
      resource_extensions = self._get_resource_extensions(project),
      has_scala = project.has_scala,
      scala_compiler_classpath = project.scala_compiler_classpath,
      scala = TemplateData(fsc = self.fsc) if project.has_scala else None,
      checkstyle_suppression_files = ','.join(project.checkstyle_suppression_files),
      checkstyle_classpath = ';'.join(project.checkstyle_classpath),
      debug_port=project.debug_port,
      extra_components = [],
    )

    if not self.nomerge:
      # Grab the existing components, which may include customized ones.
      existing_project_components = self._parse_xml_component_elements(self.project_filename)
      existing_module_components = self._parse_xml_component_elements(self.module_filename)

    # Generate (without merging in any extra components).
    safe_mkdir(os.path.abspath(self.intellij_output_dir))

    ipr = self._generate_to_tempfile(
        Generator(pkgutil.get_data(__name__, self.project_template), project = configured_project))
    iml = self._generate_to_tempfile(
        Generator(pkgutil.get_data(__name__, self.module_template), module = configured_module))

    if not self.nomerge:
      # Get the names of the components we generated, and then delete the
      # generated files.  Clunky, but performance is not an issue, and this
      # is an easy way to get those component names from the templates.
      extra_project_components = self._get_components_to_merge(existing_project_components, ipr)
      extra_module_components =  self._get_components_to_merge(existing_module_components, iml)
      os.remove(ipr)
      os.remove(iml)

      # Generate again, with the extra components.
      ipr = self._generate_to_tempfile(Generator(pkgutil.get_data(__name__, self.project_template),
          project = configured_project.extend(extra_components = extra_project_components)))
      iml = self._generate_to_tempfile(Generator(pkgutil.get_data(__name__, self.module_template),
          module = configured_module.extend(extra_components = extra_module_components)))

    shutil.move(ipr, self.project_filename)
    shutil.move(iml, self.module_filename)

    print('\nGenerated project at %s%s' % (self.work_dir, os.sep))

    return self.project_filename if self.open else None
示例#48
0
    def create_dir(cls, relpath):
        """Creates a directory under the buildroot.

    relpath: The relative path to the directory from the build root.
    """
        safe_mkdir(os.path.join(cls.build_root, relpath))
示例#49
0
 def _setup_interpreter(self, interpreter):
   interpreter_dir = os.path.join(self._path, str(interpreter.identity))
   safe_mkdir(interpreter_dir)
   _safe_link(interpreter.binary, os.path.join(interpreter_dir, 'python'))
   return _resolve(self._config, interpreter, logger=self._logger)
示例#50
0
文件: process.py 项目: zmyer/aurora
    def __init__(self,
                 name,
                 cmdline,
                 sequence,
                 pathspec,
                 sandbox_dir,
                 user=None,
                 platform=None,
                 logger_destination=LoggerDestination.FILE,
                 logger_mode=LoggerMode.STANDARD,
                 rotate_log_size=None,
                 rotate_log_backups=None):
        """
      required:
        name        = name of the process
        cmdline     = cmdline of the process
        sequence    = the next available sequence number for state updates
        pathspec    = TaskPath object for synthesizing path names
        sandbox_dir = the sandbox in which to run the process
        platform    = Platform providing fork, clock, getpid

      optional:
        user               = the user to run as (if unspecified, will default to current user.)
                             if specified to a user that is not the current user, you must have root
                             access
        logger_destination = The destination for logs output.
        logger_mode        = The type of logger to use for the process.
        rotate_log_size    = The maximum size of the rotated stdout/stderr logs.
        rotate_log_backups = The maximum number of rotated stdout/stderr log backups.
    """
        self._name = name
        self._cmdline = cmdline
        self._pathspec = pathspec
        self._seq = sequence
        self._sandbox = sandbox_dir
        if self._sandbox:
            safe_mkdir(self._sandbox)
        self._pid = None
        self._fork_time = None
        self._user = user
        self._ckpt = None
        self._ckpt_head = -1
        if platform is None:
            raise ValueError("Platform must be specified")
        self._platform = platform
        self._logger_destination = logger_destination
        self._logger_mode = logger_mode
        self._rotate_log_size = rotate_log_size
        self._rotate_log_backups = rotate_log_backups

        if not LoggerDestination.is_valid(self._logger_destination):
            raise ValueError("Logger destination %s is invalid." %
                             self._logger_destination)

        if not LoggerMode.is_valid(self._logger_mode):
            raise ValueError("Logger mode %s is invalid." % self._logger_mode)

        if self._logger_mode == LoggerMode.ROTATE:
            if self._rotate_log_size.as_(Data.BYTES) <= 0:
                raise ValueError('Log size cannot be less than one byte.')
            if self._rotate_log_backups <= 0:
                raise ValueError('Log backups cannot be less than one.')
示例#51
0
    def run(self, lock):
        if self.options.dry_run:
            print '****** Dry Run ******'

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init('goals')
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn(
                '--all-recursive is deprecated, use a target spec with the form [dir]:: instead'
            )
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn(
                '--all is deprecated, use a target spec with the form [dir]: instead'
            )
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config,
                          self.options,
                          self.targets,
                          requested_goals=self.requested_goals,
                          lock=lock,
                          log=logger,
                          timer=self.timer if self.options.time else None)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print('Unknown goal(s): %s' % ' '.join(phase.name
                                                   for phase in unknown))
            print('')
            return Phase.execute(context, 'goals')

        if logger:
            logger.debug('Operating on targets: %s', self.targets)

        ret = Phase.attempt(context, self.phases)

        if self.options.cleanup_nailguns or self.config.get(
                'nailgun', 'autokill', default=False):
            if log:
                log.debug('auto-killing nailguns')
            if NailgunTask.killall:
                NailgunTask.killall(log)

        if self.options.time:
            print('Timing report')
            print('=============')
            self.timer.print_timings()

        return ret
示例#52
0
 def __init__(self, config, logger=None):
   self._path = self._cache_dir(config)
   self._config = config
   safe_mkdir(self._path)
   self._interpreters = set()
   self._logger = logger or (lambda msg: True)
示例#53
0
    def bundle(self, app):
        bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)
        self.context.log.info('creating %s' %
                              os.path.relpath(bundledir, get_buildroot()))

        safe_mkdir(bundledir, clean=True)

        classpath = OrderedSet()
        if not self.deployjar:
            libdir = os.path.join(bundledir, 'libs')
            os.mkdir(libdir)

            for basedir, externaljar in self.list_jar_dependencies(app.binary):
                src = os.path.join(basedir, externaljar)
                link_name = os.path.join(libdir, externaljar)
                try:
                    os.symlink(src, link_name)
                except OSError as e:
                    if e.errno == errno.EEXIST:
                        raise TaskError(
                            'Trying to symlink %s to %s, but it is already symlinked to %s. '
                            % (link_name, src, os.readlink(link_name)) +
                            'Does the bundled target depend on multiple jvm_binary targets?'
                        )
                    else:
                        raise
                classpath.add(externaljar)

        for basedir, jars in self.context.products.get('jars').get(
                app.binary).items():
            if len(jars) != 1:
                raise TaskError('Expected 1 mapped binary but found: %s' %
                                jars)

            binary = jars.pop()
            binary_jar = os.path.join(basedir, binary)
            bundle_jar = os.path.join(bundledir, binary)
            if not classpath:
                os.symlink(binary_jar, bundle_jar)
            else:
                with open_zip(binary_jar, 'r') as src:
                    with open_zip(bundle_jar, 'w',
                                  compression=ZIP_DEFLATED) as dest:
                        for item in src.infolist():
                            buffer = src.read(item.filename)
                            if Manifest.PATH == item.filename:
                                manifest = Manifest(buffer)
                                manifest.addentry(
                                    Manifest.CLASS_PATH, ' '.join(
                                        os.path.join('libs', jar)
                                        for jar in classpath))
                                buffer = manifest.contents()
                            dest.writestr(item, buffer)

        for bundle in app.bundles:
            for path, relpath in bundle.filemap.items():
                bundlepath = os.path.join(bundledir, relpath)
                safe_mkdir(os.path.dirname(bundlepath))
                os.symlink(path, bundlepath)

        return bundledir
示例#54
0
文件: goal.py 项目: wfarner/commons
 def write_pidfile():
   safe_mkdir(os.path.dirname(pidfile))
   with open(pidfile, 'w') as outfile:
     outfile.write(str(os.getpid()))
示例#55
0
    def _split_classes_dir(self, state, diff):
        """Split the merged classes dir into one dir per underlying artifact."""
        if len(self.underlying_artifacts) <= 1:
            return

        def map_classes_by_package(classes):
            # E.g., com/foo/bar/Bar.scala, com/foo/bar/Baz.scala to com/foo/bar -> [Bar.scala, Baz.scala].
            ret = defaultdict(list)
            for cls in classes:
                ret[os.path.dirname(cls)].append(os.path.basename(cls))
            return ret

        self.log.debug('Splitting classes dir %s' % self.classes_dir)
        if diff:
            new_or_changed_classnames_by_package = map_classes_by_package(
                diff.new_or_changed_classes)
            deleted_classnames_by_package = map_classes_by_package(
                diff.deleted_classes)
        else:
            new_or_changed_classnames_by_package = None
            deleted_classnames_by_package = None

        symlinkable_packages = self._symlinkable_packages(state)
        for artifact in self.underlying_artifacts:
            classnames_by_package = \
              map_classes_by_package(state.classes_by_target.get(artifact.targets[0], []))

            for package, classnames in classnames_by_package.items():
                if package == "":
                    raise TaskError("Found class files %s with empty package" %
                                    classnames)
                artifact_package_dir = os.path.join(artifact.classes_dir,
                                                    package)
                merged_package_dir = os.path.join(self.classes_dir, package)

                if package in symlinkable_packages:
                    if os.path.islink(merged_package_dir):
                        current_link = os.readlink(merged_package_dir)
                        if current_link != artifact_package_dir:
                            # The code moved to a different target.
                            os.unlink(merged_package_dir)
                            safe_rmtree(artifact_package_dir)
                            shutil.move(current_link, artifact_package_dir)
                            os.symlink(artifact_package_dir,
                                       merged_package_dir)
                    else:
                        safe_rmtree(artifact_package_dir)
                        shutil.move(merged_package_dir, artifact_package_dir)
                        os.symlink(artifact_package_dir, merged_package_dir)
                else:
                    safe_mkdir(artifact_package_dir)
                    new_or_changed_classnames = \
                      set(new_or_changed_classnames_by_package.get(package, [])) if diff else None
                    for classname in classnames:
                        if not diff or classname in new_or_changed_classnames:
                            src = os.path.join(merged_package_dir, classname)
                            dst = os.path.join(artifact_package_dir, classname)
                            self._maybe_hardlink(src, dst)
                    if diff:
                        for classname in deleted_classnames_by_package.get(
                                package, []):
                            path = os.path.join(artifact_package_dir,
                                                classname)
                            if os.path.exists(path):
                                os.unlink(path)
示例#56
0
def create_jvmdoc(command, gendir):
  safe_mkdir(gendir, clean=True)
  process = subprocess.Popen(command)
  result = process.wait()
  return result, gendir
示例#57
0
    def generate_project(self, project):
        def is_test(source_set):
            # Non test targets that otherwise live in test target roots (say a java_library), must
            # be marked as test for IDEA to correctly link the targets with the test code that uses
            # them. Therefore we check the base instead of the is_test flag.
            return source_set.source_base in SourceSet.TEST_BASES

        def create_content_root(source_set):
            root_relative_path = os.path.join(source_set.source_base, source_set.path) \
                                 if source_set.path else source_set.source_base

            sources = TemplateData(path=root_relative_path,
                                   package_prefix=source_set.path.replace(
                                       '/', '.') if source_set.path else None,
                                   is_test=is_test(source_set))

            return TemplateData(
                path=root_relative_path,
                sources=[sources],
                exclude_paths=[
                    os.path.join(source_set.source_base, x)
                    for x in source_set.excludes
                ],
            )

        content_roots = [
            create_content_root(source_set) for source_set in project.sources
        ]
        if project.has_python:
            content_roots.extend(
                create_content_root(source_set)
                for source_set in project.py_sources)

        scala = None
        if project.has_scala:
            scala = TemplateData(
                language_level=self.scala_language_level,
                maximum_heap_size=self.scala_maximum_heap_size,
                fsc=self.fsc,
                compiler_classpath=project.scala_compiler_classpath)

        configured_module = TemplateData(
            root_dir=get_buildroot(),
            path=self.module_filename,
            content_roots=content_roots,
            bash=self.bash,
            python=project.has_python,
            scala=scala,
            internal_jars=[cp_entry.jar for cp_entry in project.internal_jars],
            internal_source_jars=[
                cp_entry.source_jar for cp_entry in project.internal_jars
                if cp_entry.source_jar
            ],
            external_jars=[cp_entry.jar for cp_entry in project.external_jars],
            external_javadoc_jars=[
                cp_entry.javadoc_jar for cp_entry in project.external_jars
                if cp_entry.javadoc_jar
            ],
            external_source_jars=[
                cp_entry.source_jar for cp_entry in project.external_jars
                if cp_entry.source_jar
            ],
            extra_components=[],
        )

        outdir = os.path.abspath(self.intellij_output_dir)
        if not os.path.exists(outdir):
            os.makedirs(outdir)

        configured_project = TemplateData(
            root_dir=get_buildroot(),
            outdir=outdir,
            modules=[configured_module],
            java=TemplateData(encoding=self.java_encoding,
                              maximum_heap_size=self.java_maximum_heap_size,
                              jdk=self.java_jdk,
                              language_level='JDK_1_%d' %
                              self.java_language_level),
            resource_extensions=list(project.resource_extensions),
            scala=scala,
            checkstyle_suppression_files=','.join(
                project.checkstyle_suppression_files),
            checkstyle_classpath=';'.join(project.checkstyle_classpath),
            debug_port=project.debug_port,
            extra_components=[],
        )

        existing_project_components = None
        existing_module_components = None
        if not self.nomerge:
            # Grab the existing components, which may include customized ones.
            existing_project_components = self._parse_xml_component_elements(
                self.project_filename)
            existing_module_components = self._parse_xml_component_elements(
                self.module_filename)

        # Generate (without merging in any extra components).
        safe_mkdir(os.path.abspath(self.intellij_output_dir))

        ipr = self._generate_to_tempfile(
            Generator(pkgutil.get_data(__name__, self.project_template),
                      project=configured_project))
        iml = self._generate_to_tempfile(
            Generator(pkgutil.get_data(__name__, self.module_template),
                      module=configured_module))

        if not self.nomerge:
            # Get the names of the components we generated, and then delete the
            # generated files.  Clunky, but performance is not an issue, and this
            # is an easy way to get those component names from the templates.
            extra_project_components = self._get_components_to_merge(
                existing_project_components, ipr)
            extra_module_components = self._get_components_to_merge(
                existing_module_components, iml)
            os.remove(ipr)
            os.remove(iml)

            # Generate again, with the extra components.
            ipr = self._generate_to_tempfile(
                Generator(pkgutil.get_data(__name__, self.project_template),
                          project=configured_project.extend(
                              extra_components=extra_project_components)))
            iml = self._generate_to_tempfile(
                Generator(pkgutil.get_data(__name__, self.module_template),
                          module=configured_module.extend(
                              extra_components=extra_module_components)))

        shutil.move(ipr, self.project_filename)
        shutil.move(iml, self.module_filename)

        print('\nGenerated project at %s%s' %
              (self.gen_project_workdir, os.sep))

        return self.project_filename if self.open else None
示例#58
0
 def __init__(self, root):
     self._root = root
     safe_mkdir(self._root)
示例#59
0
 def __init__(self, root):
     self._root = os.path.join(root, str(BuildInvalidator.VERSION))
     safe_mkdir(self._root)
示例#60
0
    def run(self, lock):
        timer = None
        if self.options.time:

            class Timer(object):
                def now(self):
                    return time.time()

                def log(self, message):
                    print(message)

            timer = Timer()

        logger = None
        if self.options.log or self.options.log_level:
            from twitter.common.log import init
            from twitter.common.log.options import LogOptions
            LogOptions.set_stderr_log_level((self.options.log_level
                                             or 'info').upper())
            logdir = self.options.logdir or self.config.get(
                'goals', 'logdir', default=None)
            if logdir:
                safe_mkdir(logdir)
                LogOptions.set_log_dir(logdir)
                init('goals')
            else:
                init()
            logger = log

        if self.options.recursive_directory:
            log.warn(
                '--all-recursive is deprecated, use a target spec with the form [dir]:: instead'
            )
            for dir in self.options.recursive_directory:
                self.add_target_recursive(dir)

        if self.options.target_directory:
            log.warn(
                '--all is deprecated, use a target spec with the form [dir]: instead'
            )
            for dir in self.options.target_directory:
                self.add_target_directory(dir)

        context = Context(self.config,
                          self.options,
                          self.targets,
                          lock=lock,
                          log=logger)

        unknown = []
        for phase in self.phases:
            if not phase.goals():
                unknown.append(phase)

        if unknown:
            print('Unknown goal(s): %s' % ' '.join(phase.name
                                                   for phase in unknown))
            print('')
            return Phase.execute(context, 'goals')

        if logger:
            logger.debug('Operating on targets: %s', self.targets)

        return Phase.attempt(context, self.phases, timer=timer)