Esempio n. 1
0
    def _check_invalid_tags(self, symbol_map):
        """Check for commits on any symbols that are to be converted as tags.

    SYMBOL_MAP is a map {AbstractSymbol : (Trunk|TypedSymbol)}
    indicating how each AbstractSymbol is to be converted.  If there
    is a commit on a symbol, then it cannot be converted as a tag.  If
    any tags with commits are found, output error messages describing
    the problems then raise a FatalException."""

        logger.quiet("Checking for forced tags with commits...")

        invalid_tags = []
        for symbol in symbol_map.itervalues():
            if isinstance(symbol, Tag):
                stats = self.get_stats(symbol)
                if stats.branch_commit_count > 0:
                    invalid_tags.append(symbol)

        if not invalid_tags:
            # No problems found:
            return

        s = []
        s.append('%s: The following branches cannot be forced to be tags '
                 'because they have commits:\n' % (error_prefix, ))
        for tag in invalid_tags:
            s.append('    %s\n' % (tag.name))
        s.append('\n')
        logger.error(''.join(s))

        raise FatalException()
Esempio n. 2
0
  def _check_invalid_tags(self, symbol_map):
    """Check for commits on any symbols that are to be converted as tags.

    SYMBOL_MAP is a map {AbstractSymbol : (Trunk|TypedSymbol)}
    indicating how each AbstractSymbol is to be converted.  If there
    is a commit on a symbol, then it cannot be converted as a tag.  If
    any tags with commits are found, output error messages describing
    the problems then raise a FatalException."""

    logger.quiet("Checking for forced tags with commits...")

    invalid_tags = [ ]
    for symbol in symbol_map.itervalues():
      if isinstance(symbol, Tag):
        stats = self.get_stats(symbol)
        if stats.branch_commit_count > 0:
          invalid_tags.append(symbol)

    if not invalid_tags:
      # No problems found:
      return

    s = []
    s.append(
        '%s: The following branches cannot be forced to be tags '
        'because they have commits:\n'
        % (error_prefix,)
        )
    for tag in invalid_tags:
      s.append('    %s\n' % (tag.name))
    s.append('\n')
    logger.error(''.join(s))

    raise FatalException()
Esempio n. 3
0
def main(progname, run_options, pass_manager):
  # Disable garbage collection, as we do not not create any circular
  # data structures.  To verify this assumption, the function
  # check_for_garbage() in pass_manager.py is run after every pass.
  # It verifies that no unreachable objects have been created (or
  # reports any that were found):
  try:
    gc.disable()
  except (AttributeError, NotImplementedError):
    # Other Python implementations implement garbage collection
    # differently, so if an error occurs just ignore it.
    pass

  # Convenience var, so we don't have to keep instantiating this Borg.
  ctx = Ctx()

  # Make sure the tmp directory exists.  Note that we don't check if
  # it's empty -- we want to be able to use, for example, "." to hold
  # tempfiles.
  if ctx.tmpdir is None:
    ctx.tmpdir = tempfile.mkdtemp(prefix=('%s-' % (progname,)))
    erase_tmpdir = True
    logger.quiet(
      'Writing temporary files to %r\n'
      'Be sure to use --tmpdir=%r if you need to resume this conversion.'
      % (ctx.tmpdir, ctx.tmpdir,),
      )
  elif not os.path.exists(ctx.tmpdir):
    os.mkdir(ctx.tmpdir)
    erase_tmpdir = True
  elif not os.path.isdir(ctx.tmpdir):
    raise FatalError(
        "cvs2svn tried to use '%s' for temporary files, but that path\n"
        "  exists and is not a directory.  Please make it be a directory,\n"
        "  or specify some other directory for temporary files."
        % (ctx.tmpdir,))
  else:
    erase_tmpdir = False

  # But do lock the tmpdir, to avoid process clash.
  try:
    os.mkdir(os.path.join(ctx.tmpdir, 'cvs2svn.lock'))
  except OSError, e:
    if e.errno == errno.EACCES:
      raise FatalError("Permission denied:"
                       + " No write access to directory '%s'." % ctx.tmpdir)
    if e.errno == errno.EEXIST:
      raise FatalError(
          "cvs2svn is using directory '%s' for temporary files, but\n"
          "  subdirectory '%s/cvs2svn.lock' exists, indicating that another\n"
          "  cvs2svn process is currently using '%s' as its temporary\n"
          "  workspace.  If you are certain that is not the case,\n"
          "  then remove the '%s/cvs2svn.lock' subdirectory."
          % (ctx.tmpdir, ctx.tmpdir, ctx.tmpdir, ctx.tmpdir,))
    raise
Esempio n. 4
0
def main(progname, run_options, pass_manager):
    # Convenience var, so we don't have to keep instantiating this Borg.
    ctx = Ctx()

    # Make sure the tmp directory exists.  Note that we don't check if
    # it's empty -- we want to be able to use, for example, "." to hold
    # tempfiles.
    if ctx.tmpdir is None:
        ctx.tmpdir = tempfile.mkdtemp(prefix=('%s-' % (progname, )))
        erase_tmpdir = True
        logger.quiet(
            'Writing temporary files to %r\n'
            'Be sure to use --tmpdir=%r if you need to resume this conversion.'
            % (
                ctx.tmpdir,
                ctx.tmpdir,
            ), )
    elif not os.path.exists(ctx.tmpdir):
        os.mkdir(ctx.tmpdir)
        erase_tmpdir = True
    elif not os.path.isdir(ctx.tmpdir):
        raise FatalError(
            "cvs2svn tried to use '%s' for temporary files, but that path\n"
            "  exists and is not a directory.  Please make it be a directory,\n"
            "  or specify some other directory for temporary files." %
            (ctx.tmpdir, ))
    else:
        erase_tmpdir = False

    # But do lock the tmpdir, to avoid process clash.
    try:
        os.mkdir(os.path.join(ctx.tmpdir, 'cvs2svn.lock'))
    except OSError, e:
        if e.errno == errno.EACCES:
            raise FatalError("Permission denied:" +
                             " No write access to directory '%s'." %
                             ctx.tmpdir)
        if e.errno == errno.EEXIST:
            raise FatalError(
                "cvs2svn is using directory '%s' for temporary files, but\n"
                "  subdirectory '%s/cvs2svn.lock' exists, indicating that another\n"
                "  cvs2svn process is currently using '%s' as its temporary\n"
                "  workspace.  If you are certain that is not the case,\n"
                "  then remove the '%s/cvs2svn.lock' subdirectory." % (
                    ctx.tmpdir,
                    ctx.tmpdir,
                    ctx.tmpdir,
                    ctx.tmpdir,
                ))
        raise
Esempio n. 5
0
def main(progname, run_options, pass_manager):
  # Convenience var, so we don't have to keep instantiating this Borg.
  ctx = Ctx()

  # Make sure the tmp directory exists.  Note that we don't check if
  # it's empty -- we want to be able to use, for example, "." to hold
  # tempfiles.
  if ctx.tmpdir is None:
    ctx.tmpdir = tempfile.mkdtemp(prefix=('%s-' % (progname,)))
    erase_tmpdir = True
    logger.quiet(
      'Writing temporary files to %r\n'
      'Be sure to use --tmpdir=%r if you need to resume this conversion.'
      % (ctx.tmpdir, ctx.tmpdir,),
      )
  elif not os.path.exists(ctx.tmpdir):
    os.mkdir(ctx.tmpdir)
    erase_tmpdir = True
  elif not os.path.isdir(ctx.tmpdir):
    raise FatalError(
        "cvs2svn tried to use '%s' for temporary files, but that path\n"
        "  exists and is not a directory.  Please make it be a directory,\n"
        "  or specify some other directory for temporary files."
        % (ctx.tmpdir,))
  else:
    erase_tmpdir = False

  # But do lock the tmpdir, to avoid process clash.
  try:
    os.mkdir(os.path.join(ctx.tmpdir, 'cvs2svn.lock'))
  except OSError, e:
    if e.errno == errno.EACCES:
      raise FatalError("Permission denied:"
                       + " No write access to directory '%s'." % ctx.tmpdir)
    if e.errno == errno.EEXIST:
      raise FatalError(
          "cvs2svn is using directory '%s' for temporary files, but\n"
          "  subdirectory '%s/cvs2svn.lock' exists, indicating that another\n"
          "  cvs2svn process is currently using '%s' as its temporary\n"
          "  workspace.  If you are certain that is not the case,\n"
          "  then remove the '%s/cvs2svn.lock' subdirectory."
          % (ctx.tmpdir, ctx.tmpdir, ctx.tmpdir, ctx.tmpdir,))
    raise
Esempio n. 6
0
    def run(self, run_options, stats_keeper):
        Ctx()._projects = read_projects(
            artifact_manager.get_temp_file(config.PROJECTS))
        Ctx()._cvs_path_db = CVSPathDatabase(DB_OPEN_READ)
        self.symbol_db = SymbolDatabase()
        Ctx()._symbol_db = self.symbol_db

        logger.quiet("Checking dependency consistency...")

        fatal_errors = []
        for cvs_item in self.iter_cvs_items():
            # Check that the pred_ids and succ_ids are mutually consistent:
            for pred_id in cvs_item.get_pred_ids():
                pred = self.get_cvs_item(pred_id)
                if not cvs_item.id in pred.get_succ_ids():
                    fatal_errors.append(
                        '%s lists pred=%s, but not vice versa.' % (
                            cvs_item,
                            pred,
                        ))

            for succ_id in cvs_item.get_succ_ids():
                succ = self.get_cvs_item(succ_id)
                if not cvs_item.id in succ.get_pred_ids():
                    fatal_errors.append(
                        '%s lists succ=%s, but not vice versa.' % (
                            cvs_item,
                            succ,
                        ))

        if fatal_errors:
            raise FatalException('Dependencies inconsistent:\n'
                                 '%s\n'
                                 'Exited due to fatal error(s).' %
                                 ('\n'.join(fatal_errors), ))

        self.symbol_db.close()
        self.symbol_db = None
        Ctx()._cvs_path_db.close()
        logger.quiet("Done")
  def run(self, run_options, stats_keeper):
    Ctx()._projects = read_projects(
        artifact_manager.get_temp_file(config.PROJECTS)
        )
    Ctx()._cvs_path_db = CVSPathDatabase(DB_OPEN_READ)
    self.symbol_db = SymbolDatabase()
    Ctx()._symbol_db = self.symbol_db

    logger.quiet("Checking dependency consistency...")

    fatal_errors = []
    for cvs_item in self.iter_cvs_items():
      # Check that the pred_ids and succ_ids are mutually consistent:
      for pred_id in cvs_item.get_pred_ids():
        pred = self.get_cvs_item(pred_id)
        if not cvs_item.id in pred.get_succ_ids():
          fatal_errors.append(
              '%s lists pred=%s, but not vice versa.' % (cvs_item, pred,))

      for succ_id in cvs_item.get_succ_ids():
        succ = self.get_cvs_item(succ_id)
        if not cvs_item.id in succ.get_pred_ids():
          fatal_errors.append(
              '%s lists succ=%s, but not vice versa.' % (cvs_item, succ,))

    if fatal_errors:
      raise FatalException(
          'Dependencies inconsistent:\n'
          '%s\n'
          'Exited due to fatal error(s).'
          % ('\n'.join(fatal_errors),)
          )

    self.symbol_db.close()
    self.symbol_db = None
    Ctx()._cvs_path_db.close()
    logger.quiet("Done")
Esempio n. 8
0
    def run(self, run_options):
        """Run the specified passes, one after another.

    RUN_OPTIONS will be passed to the Passes' run() methods.
    RUN_OPTIONS.start_pass is the number of the first pass that should
    be run.  RUN_OPTIONS.end_pass is the number of the last pass that
    should be run.  It must be that 1 <= RUN_OPTIONS.start_pass <=
    RUN_OPTIONS.end_pass <= self.num_passes."""

        # Convert start_pass and end_pass into the indices of the passes
        # to execute, using the Python index range convention (i.e., first
        # pass executed and first pass *after* the ones that should be
        # executed).
        index_start = run_options.start_pass - 1
        index_end = run_options.end_pass

        # Inform the artifact manager when artifacts are created and used:
        for (i, the_pass) in enumerate(self.passes):
            the_pass.register_artifacts()
            # Each pass creates a new version of the statistics file:
            artifact_manager.register_temp_file(
                config.STATISTICS_FILE % (i + 1, ), the_pass)
            if i != 0:
                # Each pass subsequent to the first reads the statistics file
                # from the preceding pass:
                artifact_manager.register_temp_file_needed(
                    config.STATISTICS_FILE % (i + 1 - 1, ), the_pass)

        # Tell the artifact manager about passes that are being skipped this run:
        for the_pass in self.passes[0:index_start]:
            artifact_manager.pass_skipped(the_pass)

        start_time = time.time()
        for i in range(index_start, index_end):
            the_pass = self.passes[i]
            logger.quiet('----- pass %d (%s) -----' % (
                i + 1,
                the_pass.name,
            ))
            artifact_manager.pass_started(the_pass)

            if i == 0:
                stats_keeper = StatsKeeper()
            else:
                stats_keeper = read_stats_keeper(
                    artifact_manager.get_temp_file(config.STATISTICS_FILE %
                                                   (i + 1 - 1, )))

            the_pass.run(run_options, stats_keeper)
            end_time = time.time()
            stats_keeper.log_duration_for_pass(end_time - start_time, i + 1,
                                               the_pass.name)
            logger.normal(stats_keeper.single_pass_timing(i + 1))
            stats_keeper.archive(
                artifact_manager.get_temp_file(config.STATISTICS_FILE %
                                               (i + 1, )))
            start_time = end_time
            Ctx().clean()
            # Allow the artifact manager to clean up artifacts that are no
            # longer needed:
            artifact_manager.pass_done(the_pass, Ctx().skip_cleanup)

            self.garbage_collection_policy.check_for_garbage()

        # Tell the artifact manager about passes that are being deferred:
        for the_pass in self.passes[index_end:]:
            artifact_manager.pass_deferred(the_pass)

        logger.quiet(stats_keeper)
        logger.normal(stats_keeper.timings())

        # Consistency check:
        artifact_manager.check_clean()
Esempio n. 9
0
  def run(self, run_options):
    """Run the specified passes, one after another.

    RUN_OPTIONS will be passed to the Passes' run() methods.
    RUN_OPTIONS.start_pass is the number of the first pass that should
    be run.  RUN_OPTIONS.end_pass is the number of the last pass that
    should be run.  It must be that 1 <= RUN_OPTIONS.start_pass <=
    RUN_OPTIONS.end_pass <= self.num_passes."""

    # Convert start_pass and end_pass into the indices of the passes
    # to execute, using the Python index range convention (i.e., first
    # pass executed and first pass *after* the ones that should be
    # executed).
    index_start = run_options.start_pass - 1
    index_end = run_options.end_pass

    # Inform the artifact manager when artifacts are created and used:
    for (i, the_pass) in enumerate(self.passes):
      the_pass.register_artifacts()
      # Each pass creates a new version of the statistics file:
      artifact_manager.register_temp_file(
          config.STATISTICS_FILE % (i + 1,), the_pass
          )
      if i != 0:
        # Each pass subsequent to the first reads the statistics file
        # from the preceding pass:
        artifact_manager.register_temp_file_needed(
            config.STATISTICS_FILE % (i + 1 - 1,), the_pass
            )

    # Tell the artifact manager about passes that are being skipped this run:
    for the_pass in self.passes[0:index_start]:
      artifact_manager.pass_skipped(the_pass)

    start_time = time.time()
    for i in range(index_start, index_end):
      the_pass = self.passes[i]
      logger.quiet('----- pass %d (%s) -----' % (i + 1, the_pass.name,))
      artifact_manager.pass_started(the_pass)

      if i == 0:
        stats_keeper = StatsKeeper()
      else:
        stats_keeper = read_stats_keeper(
            artifact_manager.get_temp_file(
                config.STATISTICS_FILE % (i + 1 - 1,)
                )
            )

      the_pass.run(run_options, stats_keeper)
      end_time = time.time()
      stats_keeper.log_duration_for_pass(
          end_time - start_time, i + 1, the_pass.name
          )
      logger.normal(stats_keeper.single_pass_timing(i + 1))
      stats_keeper.archive(
          artifact_manager.get_temp_file(config.STATISTICS_FILE % (i + 1,))
          )
      start_time = end_time
      Ctx().clean()
      # Allow the artifact manager to clean up artifacts that are no
      # longer needed:
      artifact_manager.pass_done(the_pass, Ctx().skip_cleanup)

      check_for_garbage()

    # Tell the artifact manager about passes that are being deferred:
    for the_pass in self.passes[index_end:]:
      artifact_manager.pass_deferred(the_pass)

    logger.quiet(stats_keeper)
    logger.normal(stats_keeper.timings())

    # Consistency check:
    artifact_manager.check_clean()
Esempio n. 10
0
  def finish(self):
    """State that we are done creating our repository."""

    logger.verbose("Finished creating Subversion repository.")
    logger.quiet("Done.")