コード例 #1
0
  def process_post_commit(self, svn_commit):
    author = self._get_author(svn_commit)
    log_msg = self._get_log_msg(svn_commit)

    source_lods = set()
    for cvs_rev in svn_commit.cvs_revs:
      source_lods.add(cvs_rev.lod)
    if len(source_lods) != 1:
      raise InternalError('Commit is from %d LODs' % (len(source_lods),))
    source_lod = source_lods.pop()

    self._mirror.start_commit(svn_commit.revnum)
    # FIXME: is this correct?:
    self.f.write('commit refs/heads/master\n')
    mark = self._create_commit_mark(None, svn_commit.revnum)
    logger.normal(
        'Writing post-commit r%d on Trunk (mark :%d)'
        % (svn_commit.revnum, mark,)
        )
    self.f.write('mark :%d\n' % (mark,))
    self.f.write(
        'committer %s %d +0000\n' % (author, svn_commit.date,)
        )
    self.f.write('data %d\n' % (len(log_msg),))
    self.f.write('%s\n' % (log_msg,))
    self.f.write(
        'merge :%d\n'
        % (self._get_source_mark(source_lod, svn_commit.revnum),)
        )
    for cvs_rev in svn_commit.cvs_revs:
      self.revision_writer.process_revision(cvs_rev, post_commit=True)

    self.f.write('\n')
    self._mirror.end_commit()
コード例 #2
0
ファイル: git_output_option.py プロジェクト: mhagger/cvs2svn
  def process_primary_commit(self, svn_commit):
    author = self._get_author(svn_commit)
    log_msg = self._get_log_msg(svn_commit)

    lods = set()
    for cvs_rev in svn_commit.get_cvs_items():
      lods.add(cvs_rev.lod)
    if len(lods) != 1:
      raise InternalError('Commit affects %d LODs' % (len(lods),))
    lod = lods.pop()

    self._mirror.start_commit(svn_commit.revnum)
    if isinstance(lod, Trunk):
      # FIXME: is this correct?:
      self.f.write('commit refs/heads/master\n')
    else:
      self.f.write('commit refs/heads/%s\n' % (lod.name,))
    mark = self._create_commit_mark(lod, svn_commit.revnum)
    logger.normal(
        'Writing commit r%d on %s (mark :%d)'
        % (svn_commit.revnum, lod, mark,)
        )
    self.f.write('mark :%d\n' % (mark,))
    self.f.write(
        'committer %s %d +0000\n' % (author, svn_commit.date,)
        )
    self.f.write('data %d\n' % (len(log_msg),))
    self.f.write('%s\n' % (log_msg,))
    for cvs_rev in svn_commit.get_cvs_items():
      self.revision_writer.process_revision(cvs_rev, post_commit=False)

    self.f.write('\n')
    self._mirror.end_commit()
コード例 #3
0
  def process_primary_commit(self, svn_commit):
    author = self._get_author(svn_commit)
    log_msg = self._get_log_msg(svn_commit)

    lods = set()
    for cvs_rev in svn_commit.get_cvs_items():
      lods.add(cvs_rev.lod)
    if len(lods) != 1:
      raise InternalError('Commit affects %d LODs' % (len(lods),))
    lod = lods.pop()

    self._mirror.start_commit(svn_commit.revnum)
    if isinstance(lod, Trunk):
      # FIXME: is this correct?:
      self.f.write('commit refs/heads/master\n')
    else:
      self.f.write('commit refs/heads/%s\n' % (lod.name,))
    mark = self._create_commit_mark(lod, svn_commit.revnum)
    logger.normal(
        'Writing commit r%d on %s (mark :%d)'
        % (svn_commit.revnum, lod, mark,)
        )
    self.f.write('mark :%d\n' % (mark,))
    self.f.write(
        'committer %s %d +0000\n' % (author, svn_commit.date,)
        )
    self.f.write('data %d\n' % (len(log_msg),))
    self.f.write('%s\n' % (log_msg,))
    for cvs_rev in svn_commit.get_cvs_items():
      self.revision_writer.process_revision(cvs_rev, post_commit=False)

    self.f.write('\n')
    self._mirror.end_commit()
コード例 #4
0
 def start(self):
     self._mark_generator = KeyGenerator()
     logger.normal("Starting generate_blobs.py...")
     self._popen = subprocess.Popen(
         [sys.executable, os.path.join(os.path.dirname(__file__), "generate_blobs.py"), self.blob_filename],
         stdin=subprocess.PIPE,
     )
コード例 #5
0
ファイル: git_output_option.py プロジェクト: mhagger/cvs2svn
  def process_post_commit(self, svn_commit):
    author = self._get_author(svn_commit)
    log_msg = self._get_log_msg(svn_commit)

    source_lods = set()
    for cvs_rev in svn_commit.cvs_revs:
      source_lods.add(cvs_rev.lod)
    if len(source_lods) != 1:
      raise InternalError('Commit is from %d LODs' % (len(source_lods),))
    source_lod = source_lods.pop()

    self._mirror.start_commit(svn_commit.revnum)
    # FIXME: is this correct?:
    self.f.write('commit refs/heads/master\n')
    mark = self._create_commit_mark(None, svn_commit.revnum)
    logger.normal(
        'Writing post-commit r%d on Trunk (mark :%d)'
        % (svn_commit.revnum, mark,)
        )
    self.f.write('mark :%d\n' % (mark,))
    self.f.write(
        'committer %s %d +0000\n' % (author, svn_commit.date,)
        )
    self.f.write('data %d\n' % (len(log_msg),))
    self.f.write('%s\n' % (log_msg,))
    self.f.write(
        'merge :%d\n'
        % (self._get_source_mark(source_lod, svn_commit.revnum),)
        )
    for cvs_rev in svn_commit.cvs_revs:
      self.revision_writer.process_revision(cvs_rev, post_commit=True)

    self.f.write('\n')
    self._mirror.end_commit()
コード例 #6
0
 def finish(self):
     self._popen.stdin.close()
     logger.normal("Waiting for generate_blobs.py to finish...")
     returncode = self._popen.wait()
     if returncode:
         raise FatalError("generate_blobs.py failed with return code %s." % (returncode,))
     else:
         logger.normal("generate_blobs.py is done.")
コード例 #7
0
 def finish(self):
     self._popen.stdin.close()
     logger.normal('Waiting for generate_blobs.py to finish...')
     returncode = self._popen.wait()
     if returncode:
         raise FatalError('generate_blobs.py failed with return code %s.' %
                          (returncode, ))
     else:
         logger.normal('generate_blobs.py is done.')
コード例 #8
0
 def finish(self):
   self._pipe.stdin.close()
   logger.normal('Waiting for generate_blobs.py to finish...')
   returncode = self._pipe.wait()
   if returncode:
     raise FatalError(
         'generate_blobs.py failed with return code %s.' % (returncode,)
         )
   else:
     logger.normal('generate_blobs.py is done.')
コード例 #9
0
ファイル: git_output_option.py プロジェクト: mhagger/cvs2svn
 def start(self, mirror, f):
   GitRevisionWriter.start(self, mirror, f)
   if Ctx().revision_collector.blob_filename is None:
     # The revision collector wrote the blobs to a temporary file;
     # copy them into f:
     logger.normal('Copying blob data to output')
     blobf = open(
         artifact_manager.get_temp_file(config.GIT_BLOB_DATAFILE), 'rb',
         )
     shutil.copyfileobj(blobf, f)
     blobf.close()
コード例 #10
0
 def start(self, mirror, f):
   GitRevisionWriter.start(self, mirror, f)
   if Ctx().revision_collector.blob_filename is None:
     # The revision collector wrote the blobs to a temporary file;
     # copy them into f:
     logger.normal('Copying blob data to output')
     blobf = open(
         artifact_manager.get_temp_file(config.GIT_BLOB_DATAFILE), 'rb',
         )
     shutil.copyfileobj(blobf, f)
     blobf.close()
コード例 #11
0
 def start(self):
     self._mark_generator = KeyGenerator()
     logger.normal('Starting generate_blobs.py...')
     self._popen = subprocess.Popen(
         [
             sys.executable,
             os.path.join(os.path.dirname(__file__), 'generate_blobs.py'),
             self.blob_filename,
         ],
         stdin=subprocess.PIPE,
     )
コード例 #12
0
 def start(self):
   self._mark_generator = KeyGenerator()
   logger.normal('Starting generate_blobs.py...')
   if self.blob_filename is None:
     blob_filename = artifact_manager.get_temp_file(config.GIT_BLOB_DATAFILE)
   else:
     blob_filename = self.blob_filename
   self._pipe = subprocess.Popen(
       [
           sys.executable,
           os.path.join(os.path.dirname(__file__), 'generate_blobs.py'),
           blob_filename,
           ],
       stdin=subprocess.PIPE,
       )
コード例 #13
0
ファイル: repository_walker.py プロジェクト: mhagger/cvs2svn
  def _generate_attic_cvs_files(self, cvs_directory, exclude_paths):
    """Generate CVSFiles for the files in Attic directory CVS_DIRECTORY.

    Also yield CVS_DIRECTORY if any files are being retained in the
    Attic.

    Silently ignore subdirectories named '.svn' or 'CVS', but emit a
    warning if any other directories are found within the Attic
    directory."""

    retained_attic_files = []

    fnames = os.listdir(cvs_directory.rcs_path)
    fnames.sort()
    for fname in fnames:
      pathname = os.path.join(cvs_directory.rcs_path, fname)
      path_in_repository = path_join(cvs_directory.get_cvs_path(), fname)
      if path_in_repository in exclude_paths:
        logger.normal(
            "Excluding file from conversion: %s" % (path_in_repository,)
            )
      elif os.path.isdir(pathname):
        if fname == '.svn' or fname == 'CVS':
          logger.debug(
              "Directory %s found within Attic; ignoring" % (pathname,)
              )
        else:
          logger.warn(
              "Directory %s found within Attic; ignoring" % (pathname,)
              )
      elif fname.endswith(',v'):
        cvs_file = self._get_attic_file(cvs_directory, fname)
        if cvs_file.parent_directory == cvs_directory:
          # This file will be retained in the Attic directory.
          retained_attic_files.append(cvs_file)
        else:
          # This is a normal Attic file, which is treated as if it
          # were located one directory up:
          yield cvs_file

    if retained_attic_files:
      # There was at least one file in the attic that will be retained
      # in the attic.  First include the Attic directory itself in the
      # output, then the retained attic files:
      yield cvs_directory
      for cvs_file in retained_attic_files:
        yield cvs_file
コード例 #14
0
  def _generate_attic_cvs_files(self, cvs_directory, exclude_paths):
    """Generate CVSFiles for the files in Attic directory CVS_DIRECTORY.

    Also yield CVS_DIRECTORY if any files are being retained in the
    Attic.

    Silently ignore subdirectories named '.svn' or 'CVS', but emit a
    warning if any other directories are found within the Attic
    directory."""

    retained_attic_files = []

    fnames = os.listdir(cvs_directory.rcs_path)
    fnames.sort()
    for fname in fnames:
      pathname = os.path.join(cvs_directory.rcs_path, fname)
      path_in_repository = path_join(cvs_directory.get_cvs_path(), fname)
      if path_in_repository in exclude_paths:
        logger.normal(
            "Excluding file from conversion: %s" % (path_in_repository,)
            )
      elif os.path.isdir(pathname):
        if fname == '.svn' or fname == 'CVS':
          logger.debug(
              "Directory %s found within Attic; ignoring" % (pathname,)
              )
        else:
          logger.warn(
              "Directory %s found within Attic; ignoring" % (pathname,)
              )
      elif fname.endswith(',v'):
        cvs_file = self._get_attic_file(cvs_directory, fname)
        if cvs_file.parent_directory == cvs_directory:
          # This file will be retained in the Attic directory.
          retained_attic_files.append(cvs_file)
        else:
          # This is a normal Attic file, which is treated as if it
          # were located one directory up:
          yield cvs_file

    if retained_attic_files:
      # There was at least one file in the attic that will be retained
      # in the attic.  First include the Attic directory itself in the
      # output, then the retained attic files:
      yield cvs_directory
      for cvs_file in retained_attic_files:
        yield cvs_file
コード例 #15
0
ファイル: collect_data.py プロジェクト: termim/cvs2svn
  def process_file(self, cvs_file):
    logger.normal(cvs_file.filename)
    fdc = _FileDataCollector(self, cvs_file)
    try:
      cvs2svn_rcsparse.parse(open(cvs_file.filename, 'rb'), fdc)
    except (cvs2svn_rcsparse.common.RCSParseError, ValueError, RuntimeError):
      self.collect_data.record_fatal_error(
          "%r is not a valid ,v file" % (cvs_file.filename,)
          )
      # Abort the processing of this file, but let the pass continue
      # with other files:
      return
    except:
      logger.warn("Exception occurred while parsing %s" % cvs_file.filename)
      raise
    else:
      self.num_files += 1

    return fdc.get_cvs_file_items()
コード例 #16
0
ファイル: collect_data.py プロジェクト: robinst/cvs2svn
 def process_file(self, cvs_file):
   logger.normal(cvs_file.rcs_path)
   fdc = _FileDataCollector(self, cvs_file)
   try:
     cvs2svn_rcsparse.parse(open(cvs_file.rcs_path, 'rb'), fdc)
   except (cvs2svn_rcsparse.common.RCSParseError, RuntimeError):
     self.collect_data.record_fatal_error(
         "%r is not a valid ,v file" % (cvs_file.rcs_path,)
         )
     # Abort the processing of this file, but let the pass continue
     # with other files:
     return
   except ValueError, e:
     self.collect_data.record_fatal_error(
         "%r is not a valid ,v file (%s)" % (cvs_file.rcs_path, str(e),)
         )
     # Abort the processing of this file, but let the pass continue
     # with other files:
     return
コード例 #17
0
ファイル: collect_data.py プロジェクト: ralic/cvs2svn-2.4.0
 def process_file(self, cvs_file):
     logger.normal(cvs_file.rcs_path)
     fdc = _FileDataCollector(self, cvs_file)
     try:
         parse(open(cvs_file.rcs_path, 'rb'), fdc)
     except (RCSParseError, RuntimeError):
         self.collect_data.record_fatal_error("%r is not a valid ,v file" %
                                              (cvs_file.rcs_path, ))
         # Abort the processing of this file, but let the pass continue
         # with other files:
         return
     except ValueError, e:
         self.collect_data.record_fatal_error(
             "%r is not a valid ,v file (%s)" % (
                 cvs_file.rcs_path,
                 str(e),
             ))
         # Abort the processing of this file, but let the pass continue
         # with other files:
         return
コード例 #18
0
ファイル: collect_data.py プロジェクト: robinst/cvs2svn
 def summarize_symbol_transforms(self):
   if self.symbol_transform_counts and logger.is_on(logger.NORMAL):
     logger.normal('Summary of symbol transforms:')
     transforms = self.symbol_transform_counts.items()
     transforms.sort()
     for ((old_name, new_name), count) in transforms:
       if new_name is None:
         logger.normal('    "%s" ignored in %d files' % (old_name, count,))
       else:
         logger.normal(
             '    "%s" transformed to "%s" in %d files'
             % (old_name, new_name, count,)
             )
コード例 #19
0
 def summarize_symbol_transforms(self):
     if self.symbol_transform_counts and logger.is_on(logger.NORMAL):
         logger.normal('Summary of symbol transforms:')
         transforms = self.symbol_transform_counts.items()
         transforms.sort()
         for ((old_name, new_name), count) in transforms:
             if new_name is None:
                 logger.normal('    "%s" ignored in %d files' % (
                     old_name,
                     count,
                 ))
             else:
                 logger.normal('    "%s" transformed to "%s" in %d files' %
                               (
                                   old_name,
                                   new_name,
                                   count,
                               ))
コード例 #20
0
ファイル: stdout_delegate.py プロジェクト: termim/cvs2svn
  def start_commit(self, revnum, revprops):
    """Prints out the Subversion revision number of the commit that is
    being started."""

    logger.verbose("=" * 60)
    logger.normal("Starting Subversion r%d / %d" % (revnum, self.total_revs))
コード例 #21
0
ファイル: pass_manager.py プロジェクト: termim/cvs2svn
  def run(self, run_options):
    """Run the specified passes, one after another.

    RUN_OPTIONS will be passed to the Passes' run() methods.
    RUN_OPTIONS.start_pass is the number of the first pass that should
    be run.  RUN_OPTIONS.end_pass is the number of the last pass that
    should be run.  It must be that 1 <= RUN_OPTIONS.start_pass <=
    RUN_OPTIONS.end_pass <= self.num_passes."""

    # Convert start_pass and end_pass into the indices of the passes
    # to execute, using the Python index range convention (i.e., first
    # pass executed and first pass *after* the ones that should be
    # executed).
    index_start = run_options.start_pass - 1
    index_end = run_options.end_pass

    # Inform the artifact manager when artifacts are created and used:
    for (i, the_pass) in enumerate(self.passes):
      the_pass.register_artifacts()
      # Each pass creates a new version of the statistics file:
      artifact_manager.register_temp_file(
          config.STATISTICS_FILE % (i + 1,), the_pass
          )
      if i != 0:
        # Each pass subsequent to the first reads the statistics file
        # from the preceding pass:
        artifact_manager.register_temp_file_needed(
            config.STATISTICS_FILE % (i + 1 - 1,), the_pass
            )

    # Tell the artifact manager about passes that are being skipped this run:
    for the_pass in self.passes[0:index_start]:
      artifact_manager.pass_skipped(the_pass)

    start_time = time.time()
    for i in range(index_start, index_end):
      the_pass = self.passes[i]
      logger.quiet('----- pass %d (%s) -----' % (i + 1, the_pass.name,))
      artifact_manager.pass_started(the_pass)

      if i == 0:
        stats_keeper = StatsKeeper()
      else:
        stats_keeper = read_stats_keeper(
            artifact_manager.get_temp_file(
                config.STATISTICS_FILE % (i + 1 - 1,)
                )
            )

      the_pass.run(run_options, stats_keeper)
      end_time = time.time()
      stats_keeper.log_duration_for_pass(
          end_time - start_time, i + 1, the_pass.name
          )
      logger.normal(stats_keeper.single_pass_timing(i + 1))
      stats_keeper.archive(
          artifact_manager.get_temp_file(config.STATISTICS_FILE % (i + 1,))
          )
      start_time = end_time
      Ctx().clean()
      # Allow the artifact manager to clean up artifacts that are no
      # longer needed:
      artifact_manager.pass_done(the_pass, Ctx().skip_cleanup)

      check_for_garbage()

    # Tell the artifact manager about passes that are being deferred:
    for the_pass in self.passes[index_end:]:
      artifact_manager.pass_deferred(the_pass)

    logger.quiet(stats_keeper)
    logger.normal(stats_keeper.timings())

    # Consistency check:
    artifact_manager.check_clean()
コード例 #22
0
ファイル: repository_walker.py プロジェクト: akiernan/cvs2svn
  def generate_cvs_paths(self, cvs_directory, exclude_paths):
    """Generate the CVSPaths under non-Attic directory CVS_DIRECTORY.

    Yield CVSDirectory and CVSFile instances as they are found.
    Process directories recursively, including Attic directories.
    Also look for conflicts between the filenames that will result
    from files, attic files, and subdirectories.

    Silently ignore subdirectories named '.svn', as these don't make
    much sense in a real conversion, but they are present in our test
    suite."""

    yield cvs_directory

    # Map {cvs_file.rcs_basename : cvs_file.rcs_path} for files
    # directly in cvs_directory:
    rcsfiles = {}

    attic_dir = None

    # Non-Attic subdirectories of cvs_directory (to be recursed into):
    dirs = []

    fnames = os.listdir(cvs_directory.rcs_path)
    fnames.sort()
    for fname in fnames:
      pathname = os.path.join(cvs_directory.rcs_path, fname)
      path_in_repository = path_join(cvs_directory.get_cvs_path(), fname)
      if path_in_repository in exclude_paths:
        logger.normal(
            "Excluding file from conversion: %s" % (path_in_repository,)
            )
        pass
      elif os.path.isdir(pathname):
        if fname == 'Attic':
          attic_dir = fname
        elif fname == '.svn':
          logger.debug("Directory %s ignored" % (pathname,))
        else:
          dirs.append(fname)
      elif fname.endswith(',v'):
        cvs_file = self._get_cvs_file(cvs_directory, fname)
        rcsfiles[cvs_file.rcs_basename] = cvs_file.rcs_path
        yield cvs_file
      else:
        # Silently ignore other files:
        pass

    # Map {cvs_file.rcs_basename : cvs_file.rcs_path} for files in an
    # Attic directory within cvs_directory:
    attic_rcsfiles = {}

    if attic_dir is not None:
      attic_directory = CVSDirectory(
          self.file_key_generator.gen_id(),
          cvs_directory.project, cvs_directory, 'Attic',
          )

      for cvs_path in self._generate_attic_cvs_files(attic_directory, exclude_paths):
        if isinstance(cvs_path, CVSFile) \
               and cvs_path.parent_directory == cvs_directory:
          attic_rcsfiles[cvs_path.rcs_basename] = cvs_path.rcs_path

        yield cvs_path

      alldirs = dirs + [attic_dir]
    else:
      alldirs = dirs

    # Check for conflicts between directory names and the filenames
    # that will result from the rcs files (both in this directory and
    # in attic).  (We recurse into the subdirectories nevertheless, to
    # try to detect more problems.)
    for fname in alldirs:
      for rcsfile_list in [rcsfiles, attic_rcsfiles]:
        if fname in rcsfile_list:
          self.error_handler(
              'Directory name conflicts with filename.  Please remove or '
              'rename one\n'
              'of the following:\n'
              '    "%s"\n'
              '    "%s"' % (
                  os.path.join(cvs_directory.rcs_path, fname),
                  rcsfile_list[fname],
                  )
              )

    # Now recurse into the other subdirectories:
    for fname in dirs:
      dirname = os.path.join(cvs_directory.rcs_path, fname)

      # Verify that the directory name does not contain any illegal
      # characters:
      try:
        Ctx().output_option.verify_filename_legal(fname)
      except IllegalSVNPathError, e:
        raise FatalError(
            'Directory %r would result in an illegal SVN path name: %s'
            % (dirname, e,)
            )

      sub_directory = CVSDirectory(
          self.file_key_generator.gen_id(),
          cvs_directory.project, cvs_directory, fname,
          )

      for cvs_path in self.generate_cvs_paths(sub_directory, exclude_paths):
        yield cvs_path
コード例 #23
0
    def run(self, run_options):
        """Run the specified passes, one after another.

    RUN_OPTIONS will be passed to the Passes' run() methods.
    RUN_OPTIONS.start_pass is the number of the first pass that should
    be run.  RUN_OPTIONS.end_pass is the number of the last pass that
    should be run.  It must be that 1 <= RUN_OPTIONS.start_pass <=
    RUN_OPTIONS.end_pass <= self.num_passes."""

        # Convert start_pass and end_pass into the indices of the passes
        # to execute, using the Python index range convention (i.e., first
        # pass executed and first pass *after* the ones that should be
        # executed).
        index_start = run_options.start_pass - 1
        index_end = run_options.end_pass

        # Inform the artifact manager when artifacts are created and used:
        for (i, the_pass) in enumerate(self.passes):
            the_pass.register_artifacts()
            # Each pass creates a new version of the statistics file:
            artifact_manager.register_temp_file(
                config.STATISTICS_FILE % (i + 1, ), the_pass)
            if i != 0:
                # Each pass subsequent to the first reads the statistics file
                # from the preceding pass:
                artifact_manager.register_temp_file_needed(
                    config.STATISTICS_FILE % (i + 1 - 1, ), the_pass)

        # Tell the artifact manager about passes that are being skipped this run:
        for the_pass in self.passes[0:index_start]:
            artifact_manager.pass_skipped(the_pass)

        start_time = time.time()
        for i in range(index_start, index_end):
            the_pass = self.passes[i]
            logger.quiet('----- pass %d (%s) -----' % (
                i + 1,
                the_pass.name,
            ))
            artifact_manager.pass_started(the_pass)

            if i == 0:
                stats_keeper = StatsKeeper()
            else:
                stats_keeper = read_stats_keeper(
                    artifact_manager.get_temp_file(config.STATISTICS_FILE %
                                                   (i + 1 - 1, )))

            the_pass.run(run_options, stats_keeper)
            end_time = time.time()
            stats_keeper.log_duration_for_pass(end_time - start_time, i + 1,
                                               the_pass.name)
            logger.normal(stats_keeper.single_pass_timing(i + 1))
            stats_keeper.archive(
                artifact_manager.get_temp_file(config.STATISTICS_FILE %
                                               (i + 1, )))
            start_time = end_time
            Ctx().clean()
            # Allow the artifact manager to clean up artifacts that are no
            # longer needed:
            artifact_manager.pass_done(the_pass, Ctx().skip_cleanup)

            self.garbage_collection_policy.check_for_garbage()

        # Tell the artifact manager about passes that are being deferred:
        for the_pass in self.passes[index_end:]:
            artifact_manager.pass_deferred(the_pass)

        logger.quiet(stats_keeper)
        logger.normal(stats_keeper.timings())

        # Consistency check:
        artifact_manager.check_clean()
コード例 #24
0
ファイル: repository_walker.py プロジェクト: mhagger/cvs2svn
  def generate_cvs_paths(self, cvs_directory, exclude_paths):
    """Generate the CVSPaths under non-Attic directory CVS_DIRECTORY.

    Yield CVSDirectory and CVSFile instances as they are found.
    Process directories recursively, including Attic directories.
    Also look for conflicts between the filenames that will result
    from files, attic files, and subdirectories.

    Silently ignore subdirectories named 'CVS', as these are used by
    CVS to store metadata that are irrelevant to the conversion.
    Silently ignore subdirectories named '.svn', as these don't make
    much sense in a real conversion, but they are present in our test
    suite."""

    yield cvs_directory

    # Map {cvs_file.rcs_basename : cvs_file.rcs_path} for files
    # directly in cvs_directory:
    rcsfiles = {}

    attic_dir = None

    # Non-Attic subdirectories of cvs_directory (to be recursed into):
    dirs = []

    fnames = os.listdir(cvs_directory.rcs_path)
    fnames.sort()
    for fname in fnames:
      pathname = os.path.join(cvs_directory.rcs_path, fname)
      path_in_repository = path_join(cvs_directory.get_cvs_path(), fname)
      if path_in_repository in exclude_paths:
        logger.normal(
            "Excluding file from conversion: %s" % (path_in_repository,)
            )
        pass
      elif os.path.isdir(pathname):
        if fname == 'Attic':
          attic_dir = fname
        elif fname == '.svn' or fname == 'CVS':
          logger.debug("Directory %s ignored" % (pathname,))
        else:
          dirs.append(fname)
      elif fname.endswith(',v'):
        cvs_file = self._get_cvs_file(cvs_directory, fname)
        rcsfiles[cvs_file.rcs_basename] = cvs_file.rcs_path
        yield cvs_file
      else:
        # Silently ignore other files:
        pass

    # Map {cvs_file.rcs_basename : cvs_file.rcs_path} for files in an
    # Attic directory within cvs_directory:
    attic_rcsfiles = {}

    if attic_dir is not None:
      attic_directory = CVSDirectory(
          self.file_key_generator.gen_id(),
          cvs_directory.project, cvs_directory, 'Attic',
          )

      for cvs_path in self._generate_attic_cvs_files(attic_directory, exclude_paths):
        if isinstance(cvs_path, CVSFile) \
               and cvs_path.parent_directory == cvs_directory:
          attic_rcsfiles[cvs_path.rcs_basename] = cvs_path.rcs_path

        yield cvs_path

      alldirs = dirs + [attic_dir]
    else:
      alldirs = dirs

    # Check for conflicts between directory names and the filenames
    # that will result from the rcs files (both in this directory and
    # in attic).  (We recurse into the subdirectories nevertheless, to
    # try to detect more problems.)
    for fname in alldirs:
      for rcsfile_list in [rcsfiles, attic_rcsfiles]:
        if fname in rcsfile_list:
          self.error_handler(
              'Directory name conflicts with filename.  Please remove or '
              'rename one\n'
              'of the following:\n'
              '    "%s"\n'
              '    "%s"' % (
                  os.path.join(cvs_directory.rcs_path, fname),
                  rcsfile_list[fname],
                  )
              )

    # Now recurse into the other subdirectories:
    for fname in dirs:
      dirname = os.path.join(cvs_directory.rcs_path, fname)

      # Verify that the directory name does not contain any illegal
      # characters:
      try:
        Ctx().output_option.verify_filename_legal(fname)
      except IllegalSVNPathError, e:
        raise FatalError(
            'Directory %r would result in an illegal SVN path name: %s'
            % (dirname, e,)
            )

      sub_directory = CVSDirectory(
          self.file_key_generator.gen_id(),
          cvs_directory.project, cvs_directory, fname,
          )

      for cvs_path in self.generate_cvs_paths(sub_directory, exclude_paths):
        yield cvs_path