Ejemplo n.º 1
0
    def test_run_bzr_subprocess_env_del(self):
        """run_bzr_subprocess can remove environment variables too."""
        # Create a random email, so we are sure this won't collide
        rand_bzr_email = 'John Doe <jdoe@%s.com>' % (osutils.rand_chars(20),)
        rand_email = 'Jane Doe <jdoe@%s.com>' % (osutils.rand_chars(20),)
        os.environ['BZR_EMAIL'] = rand_bzr_email
        os.environ['EMAIL'] = rand_email
        try:
            # By default, the child will inherit the current env setting
            out, err = self.run_bzr_subprocess('whoami', universal_newlines=True)
            self.assertEqual('', err)
            self.assertEqual(rand_bzr_email + '\n', out)

            # Now that BZR_EMAIL is not set, it should fall back to EMAIL
            out, err = self.run_bzr_subprocess('whoami',
                                               env_changes={'BZR_EMAIL':None},
                                               universal_newlines=True)
            self.assertEqual('', err)
            self.assertEqual(rand_email + '\n', out)

            # This switches back to the default email guessing logic
            # Which shouldn't match either of the above addresses
            out, err = self.run_bzr_subprocess('whoami',
                           env_changes={'BZR_EMAIL':None, 'EMAIL':None},
                           universal_newlines=True)

            self.assertEqual('', err)
            self.assertNotEqual(rand_bzr_email + '\n', out)
            self.assertNotEqual(rand_email + '\n', out)
        finally:
            # TestCase cleans up BZR_EMAIL, and EMAIL at startup
            del os.environ['BZR_EMAIL']
            del os.environ['EMAIL']
Ejemplo n.º 2
0
    def test__c_intern_handles_refcount(self):
        if self.module is _static_tuple_py:
            return # Not applicable
        unique_str1 = 'unique str ' + osutils.rand_chars(20)
        unique_str2 = 'unique str ' + osutils.rand_chars(20)
        key = self.module.StaticTuple(unique_str1, unique_str2)
        self.assertRefcount(1, key)
        self.assertFalse(key in self.module._interned_tuples)
        self.assertFalse(key._is_interned())
        key2 = self.module.StaticTuple(unique_str1, unique_str2)
        self.assertRefcount(1, key)
        self.assertRefcount(1, key2)
        self.assertEqual(key, key2)
        self.assertIsNot(key, key2)

        key3 = key.intern()
        self.assertIs(key, key3)
        self.assertTrue(key in self.module._interned_tuples)
        self.assertEqual(key, self.module._interned_tuples[key])
        # key and key3, but we 'hide' the one in _interned_tuples
        self.assertRefcount(2, key)
        del key3
        self.assertRefcount(1, key)
        self.assertTrue(key._is_interned())
        self.assertRefcount(1, key2)
        key3 = key2.intern()
        # key3 now points to key as well, and *not* to key2
        self.assertRefcount(2, key)
        self.assertRefcount(1, key2)
        self.assertIs(key, key3)
        self.assertIsNot(key3, key2)
        del key2
        del key3
        self.assertRefcount(1, key)
    def test__c_intern_handles_refcount(self):
        if self.module is _static_tuple_py:
            return  # Not applicable
        unique_str1 = 'unique str ' + osutils.rand_chars(20)
        unique_str2 = 'unique str ' + osutils.rand_chars(20)
        key = self.module.StaticTuple(unique_str1, unique_str2)
        self.assertRefcount(1, key)
        self.assertFalse(key in self.module._interned_tuples)
        self.assertFalse(key._is_interned())
        key2 = self.module.StaticTuple(unique_str1, unique_str2)
        self.assertRefcount(1, key)
        self.assertRefcount(1, key2)
        self.assertEqual(key, key2)
        self.assertIsNot(key, key2)

        key3 = key.intern()
        self.assertIs(key, key3)
        self.assertTrue(key in self.module._interned_tuples)
        self.assertEqual(key, self.module._interned_tuples[key])
        # key and key3, but we 'hide' the one in _interned_tuples
        self.assertRefcount(2, key)
        del key3
        self.assertRefcount(1, key)
        self.assertTrue(key._is_interned())
        self.assertRefcount(1, key2)
        key3 = key2.intern()
        # key3 now points to key as well, and *not* to key2
        self.assertRefcount(2, key)
        self.assertRefcount(1, key2)
        self.assertIs(key, key3)
        self.assertIsNot(key3, key2)
        del key2
        del key3
        self.assertRefcount(1, key)
 def test_intern(self):
     unique_str1 = 'unique str ' + osutils.rand_chars(20)
     unique_str2 = 'unique str ' + osutils.rand_chars(20)
     key = self.module.StaticTuple(unique_str1, unique_str2)
     self.assertFalse(key in self.module._interned_tuples)
     key2 = self.module.StaticTuple(unique_str1, unique_str2)
     self.assertEqual(key, key2)
     self.assertIsNot(key, key2)
     key3 = key.intern()
     self.assertIs(key, key3)
     self.assertTrue(key in self.module._interned_tuples)
     self.assertEqual(key, self.module._interned_tuples[key])
     key2 = key2.intern()
     self.assertIs(key, key2)
Ejemplo n.º 5
0
 def test_intern(self):
     unique_str1 = 'unique str ' + osutils.rand_chars(20)
     unique_str2 = 'unique str ' + osutils.rand_chars(20)
     key = self.module.StaticTuple(unique_str1, unique_str2)
     self.assertFalse(key in self.module._interned_tuples)
     key2 = self.module.StaticTuple(unique_str1, unique_str2)
     self.assertEqual(key, key2)
     self.assertIsNot(key, key2)
     key3 = key.intern()
     self.assertIs(key, key3)
     self.assertTrue(key in self.module._interned_tuples)
     self.assertEqual(key, self.module._interned_tuples[key])
     key2 = key2.intern()
     self.assertIs(key, key2)
Ejemplo n.º 6
0
    def force_break_corrupt(self, corrupt_info_lines):
        """Release a lock that has been corrupted.

        This is very similar to force_break, it except it doesn't assume that
        self.peek() can work.

        :param corrupt_info_lines: the lines of the corrupted info file, used
            to check that the lock hasn't changed between reading the (corrupt)
            info file and calling force_break_corrupt.
        """
        # XXX: this copes with unparseable info files, but what about missing
        # info files?  Or missing lock dirs?
        self._check_not_locked()
        tmpname = '%s/broken.%s.tmp' % (self.path, rand_chars(20))
        self.transport.rename(self._held_dir, tmpname)
        # check that we actually broke the right lock, not someone else;
        # there's a small race window between checking it and doing the
        # rename.
        broken_info_path = tmpname + self.__INFO_NAME
        broken_content = self.transport.get_bytes(broken_info_path)
        broken_lines = osutils.split_lines(broken_content)
        if broken_lines != corrupt_info_lines:
            raise LockBreakMismatch(self, broken_lines, corrupt_info_lines)
        self.transport.delete(broken_info_path)
        self.transport.rmdir(tmpname)
        result = lock.LockResult(self.transport.abspath(self.path))
        for hook in self.hooks['lock_broken']:
            hook(result)
Ejemplo n.º 7
0
    def force_break_corrupt(self, corrupt_info_lines):
        """Release a lock that has been corrupted.

        This is very similar to force_break, it except it doesn't assume that
        self.peek() can work.

        :param corrupt_info_lines: the lines of the corrupted info file, used
            to check that the lock hasn't changed between reading the (corrupt)
            info file and calling force_break_corrupt.
        """
        # XXX: this copes with unparseable info files, but what about missing
        # info files?  Or missing lock dirs?
        self._check_not_locked()
        tmpname = '%s/broken.%s.tmp' % (self.path, rand_chars(20))
        self.transport.rename(self._held_dir, tmpname)
        # check that we actually broke the right lock, not someone else;
        # there's a small race window between checking it and doing the
        # rename.
        broken_info_path = tmpname + self.__INFO_NAME
        broken_content = self.transport.get_bytes(broken_info_path)
        broken_lines = osutils.split_lines(broken_content)
        if broken_lines != corrupt_info_lines:
            raise LockBreakMismatch(self, broken_lines, corrupt_info_lines)
        self.transport.delete(broken_info_path)
        self.transport.rmdir(tmpname)
        result = lock.LockResult(self.transport.abspath(self.path))
        for hook in self.hooks['lock_broken']:
            hook(result)
Ejemplo n.º 8
0
    def create_plugin_file(self, contents):
        """Create a file to be used as a plugin.

        This is created in a temporary directory, so that we
        are sure that it doesn't start in the plugin path.
        """
        os.mkdir('tmp')
        plugin_name = 'bzr_plugin_a_%s' % (osutils.rand_chars(4),)
        with open('tmp/'+plugin_name+'.py', 'wb') as f: f.write(contents)
        return plugin_name
Ejemplo n.º 9
0
 def test__c_keys_are_not_immortal(self):
     if self.module is _static_tuple_py:
         return # Not applicable
     unique_str1 = 'unique str ' + osutils.rand_chars(20)
     unique_str2 = 'unique str ' + osutils.rand_chars(20)
     key = self.module.StaticTuple(unique_str1, unique_str2)
     self.assertFalse(key in self.module._interned_tuples)
     self.assertRefcount(1, key)
     key = key.intern()
     self.assertRefcount(1, key)
     self.assertTrue(key in self.module._interned_tuples)
     self.assertTrue(key._is_interned())
     del key
     # Create a new entry, which would point to the same location
     key = self.module.StaticTuple(unique_str1, unique_str2)
     self.assertRefcount(1, key)
     # This old entry in _interned_tuples should be gone
     self.assertFalse(key in self.module._interned_tuples)
     self.assertFalse(key._is_interned())
 def test__c_keys_are_not_immortal(self):
     if self.module is _static_tuple_py:
         return  # Not applicable
     unique_str1 = 'unique str ' + osutils.rand_chars(20)
     unique_str2 = 'unique str ' + osutils.rand_chars(20)
     key = self.module.StaticTuple(unique_str1, unique_str2)
     self.assertFalse(key in self.module._interned_tuples)
     self.assertRefcount(1, key)
     key = key.intern()
     self.assertRefcount(1, key)
     self.assertTrue(key in self.module._interned_tuples)
     self.assertTrue(key._is_interned())
     del key
     # Create a new entry, which would point to the same location
     key = self.module.StaticTuple(unique_str1, unique_str2)
     self.assertRefcount(1, key)
     # This old entry in _interned_tuples should be gone
     self.assertFalse(key in self.module._interned_tuples)
     self.assertFalse(key._is_interned())
Ejemplo n.º 11
0
    def create_plugin_file(self, contents):
        """Create a file to be used as a plugin.

        This is created in a temporary directory, so that we
        are sure that it doesn't start in the plugin path.
        """
        os.mkdir('tmp')
        plugin_name = 'bzr_plugin_a_%s' % (osutils.rand_chars(4),)
        with open('tmp/'+plugin_name+'.py', 'wb') as f: f.write(contents)
        return plugin_name
Ejemplo n.º 12
0
 def _create_pending_dir(self):
     tmpname = '%s/%s.tmp' % (self.path, rand_chars(10))
     try:
         self.transport.mkdir(tmpname)
     except NoSuchFile:
         # This may raise a FileExists exception
         # which is okay, it will be caught later and determined
         # to be a LockContention.
         self._trace("lock directory does not exist, creating it")
         self.create(mode=self._dir_modebits)
         # After creating the lock directory, try again
         self.transport.mkdir(tmpname)
     self.nonce = rand_chars(20)
     info_bytes = self._prepare_info()
     # We use put_file_non_atomic because we just created a new unique
     # directory so we don't have to worry about files existing there.
     # We'll rename the whole directory into place to get atomic
     # properties
     self.transport.put_bytes_non_atomic(tmpname + self.__INFO_NAME,
                                         info_bytes)
     return tmpname
Ejemplo n.º 13
0
 def _create_pending_dir(self):
     tmpname = '%s/%s.tmp' % (self.path, rand_chars(10))
     try:
         self.transport.mkdir(tmpname)
     except NoSuchFile:
         # This may raise a FileExists exception
         # which is okay, it will be caught later and determined
         # to be a LockContention.
         self._trace("lock directory does not exist, creating it")
         self.create(mode=self._dir_modebits)
         # After creating the lock directory, try again
         self.transport.mkdir(tmpname)
     self.nonce = rand_chars(20)
     info_bytes = self._prepare_info()
     # We use put_file_non_atomic because we just created a new unique
     # directory so we don't have to worry about files existing there.
     # We'll rename the whole directory into place to get atomic
     # properties
     self.transport.put_bytes_non_atomic(tmpname + self.__INFO_NAME,
                                         info_bytes)
     return tmpname
Ejemplo n.º 14
0
 def for_this_process(cls, extra_holder_info):
     """Return a new LockHeldInfo for a lock taken by this process.
     """
     info = dict(
         hostname=get_host_name(),
         pid=str(os.getpid()),
         nonce=rand_chars(20),
         start_time=str(int(time.time())),
         user=get_username_for_lock_info(),
         )
     if extra_holder_info is not None:
         info.update(extra_holder_info)
     return cls(info)
Ejemplo n.º 15
0
 def for_this_process(cls, extra_holder_info):
     """Return a new LockHeldInfo for a lock taken by this process.
     """
     info = dict(
         hostname=get_host_name(),
         pid=str(os.getpid()),
         nonce=rand_chars(20),
         start_time=str(int(time.time())),
         user=get_username_for_lock_info(),
     )
     if extra_holder_info is not None:
         info.update(extra_holder_info)
     return cls(info)
Ejemplo n.º 16
0
    def test_run_bzr_subprocess_env_del(self):
        """run_bzr_subprocess can remove environment variables too."""
        # Create a random email, so we are sure this won't collide
        rand_bzr_email = 'John Doe <jdoe@%s.com>' % (osutils.rand_chars(20), )
        rand_email = 'Jane Doe <jdoe@%s.com>' % (osutils.rand_chars(20), )
        os.environ['BZR_EMAIL'] = rand_bzr_email
        os.environ['EMAIL'] = rand_email
        try:
            # By default, the child will inherit the current env setting
            out, err = self.run_bzr_subprocess('whoami',
                                               universal_newlines=True)
            self.assertEqual('', err)
            self.assertEqual(rand_bzr_email + '\n', out)

            # Now that BZR_EMAIL is not set, it should fall back to EMAIL
            out, err = self.run_bzr_subprocess('whoami',
                                               env_changes={'BZR_EMAIL': None},
                                               universal_newlines=True)
            self.assertEqual('', err)
            self.assertEqual(rand_email + '\n', out)

            # This switches back to the default email guessing logic
            # Which shouldn't match either of the above addresses
            out, err = self.run_bzr_subprocess('whoami',
                                               env_changes={
                                                   'BZR_EMAIL': None,
                                                   'EMAIL': None
                                               },
                                               universal_newlines=True)

            self.assertEqual('', err)
            self.assertNotEqual(rand_bzr_email + '\n', out)
            self.assertNotEqual(rand_email + '\n', out)
        finally:
            # TestCase cleans up BZR_EMAIL, and EMAIL at startup
            del os.environ['BZR_EMAIL']
            del os.environ['EMAIL']
Ejemplo n.º 17
0
    def force_break(self, dead_holder_info):
        """Release a lock held by another process.

        WARNING: This should only be used when the other process is dead; if
        it still thinks it has the lock there will be two concurrent writers.
        In general the user's approval should be sought for lock breaks.

        After the lock is broken it will not be held by any process.
        It is possible that another process may sneak in and take the
        lock before the breaking process acquires it.

        :param dead_holder_info:
            Must be the result of a previous LockDir.peek() call; this is used
            to check that it's still held by the same process that the user
            decided was dead.  If this is not the current holder,
            LockBreakMismatch is raised.

        :returns: LockResult for the broken lock.
        """
        if not isinstance(dead_holder_info, LockHeldInfo):
            raise ValueError("dead_holder_info: %r" % dead_holder_info)
        self._check_not_locked()
        current_info = self.peek()
        if current_info is None:
            # must have been recently released
            return
        if current_info != dead_holder_info:
            raise LockBreakMismatch(self, current_info, dead_holder_info)
        tmpname = '%s/broken.%s.tmp' % (self.path, rand_chars(20))
        self.transport.rename(self._held_dir, tmpname)
        # check that we actually broke the right lock, not someone else;
        # there's a small race window between checking it and doing the
        # rename.
        broken_info_path = tmpname + self.__INFO_NAME
        broken_info = self._read_info_file(broken_info_path)
        if broken_info != dead_holder_info:
            raise LockBreakMismatch(self, broken_info, dead_holder_info)
        self.transport.delete(broken_info_path)
        self.transport.rmdir(tmpname)
        result = lock.LockResult(self.transport.abspath(self.path),
                                 current_info.get('nonce'))
        for hook in self.hooks['lock_broken']:
            hook(result)
        return result
Ejemplo n.º 18
0
    def force_break(self, dead_holder_info):
        """Release a lock held by another process.

        WARNING: This should only be used when the other process is dead; if
        it still thinks it has the lock there will be two concurrent writers.
        In general the user's approval should be sought for lock breaks.

        After the lock is broken it will not be held by any process.
        It is possible that another process may sneak in and take the
        lock before the breaking process acquires it.

        :param dead_holder_info:
            Must be the result of a previous LockDir.peek() call; this is used
            to check that it's still held by the same process that the user
            decided was dead.  If this is not the current holder,
            LockBreakMismatch is raised.

        :returns: LockResult for the broken lock.
        """
        if not isinstance(dead_holder_info, LockHeldInfo):
            raise ValueError("dead_holder_info: %r" % dead_holder_info)
        self._check_not_locked()
        current_info = self.peek()
        if current_info is None:
            # must have been recently released
            return
        if current_info != dead_holder_info:
            raise LockBreakMismatch(self, current_info, dead_holder_info)
        tmpname = '%s/broken.%s.tmp' % (self.path, rand_chars(20))
        self.transport.rename(self._held_dir, tmpname)
        # check that we actually broke the right lock, not someone else;
        # there's a small race window between checking it and doing the
        # rename.
        broken_info_path = tmpname + self.__INFO_NAME
        broken_info = self._read_info_file(broken_info_path)
        if broken_info != dead_holder_info:
            raise LockBreakMismatch(self, broken_info, dead_holder_info)
        self.transport.delete(broken_info_path)
        self.transport.rmdir(tmpname)
        result = lock.LockResult(self.transport.abspath(self.path),
                                 current_info.get('nonce'))
        for hook in self.hooks['lock_broken']:
            hook(result)
        return result
Ejemplo n.º 19
0
 def unlock(self):
     """Release a held lock
     """
     if self._fake_read_lock:
         self._fake_read_lock = False
         return
     if not self._lock_held:
         return lock.cant_unlock_not_held(self)
     if self._locked_via_token:
         self._locked_via_token = False
         self._lock_held = False
     else:
         old_nonce = self.nonce
         # rename before deleting, because we can't atomically remove the
         # whole tree
         start_time = time.time()
         self._trace("unlocking")
         tmpname = '%s/releasing.%s.tmp' % (self.path, rand_chars(20))
         # gotta own it to unlock
         self.confirm()
         self.transport.rename(self._held_dir, tmpname)
         self._lock_held = False
         self.transport.delete(tmpname + self.__INFO_NAME)
         try:
             self.transport.rmdir(tmpname)
         except DirectoryNotEmpty, e:
             # There might have been junk left over by a rename that moved
             # another locker within the 'held' directory.  do a slower
             # deletion where we list the directory and remove everything
             # within it.
             #
             # Maybe this should be broader to allow for ftp servers with
             # non-specific error messages?
             self._trace(
                 "doing recursive deletion of non-empty directory "
                 "%s", tmpname)
             self.transport.delete_tree(tmpname)
         self._trace("... unlock succeeded after %dms",
                     (time.time() - start_time) * 1000)
         result = lock.LockResult(self.transport.abspath(self.path),
                                  old_nonce)
         for hook in self.hooks['lock_released']:
             hook(result)
Ejemplo n.º 20
0
 def unlock(self):
     """Release a held lock
     """
     if self._fake_read_lock:
         self._fake_read_lock = False
         return
     if not self._lock_held:
         return lock.cant_unlock_not_held(self)
     if self._locked_via_token:
         self._locked_via_token = False
         self._lock_held = False
     else:
         old_nonce = self.nonce
         # rename before deleting, because we can't atomically remove the
         # whole tree
         start_time = time.time()
         self._trace("unlocking")
         tmpname = '%s/releasing.%s.tmp' % (self.path, rand_chars(20))
         # gotta own it to unlock
         self.confirm()
         self.transport.rename(self._held_dir, tmpname)
         self._lock_held = False
         self.transport.delete(tmpname + self.__INFO_NAME)
         try:
             self.transport.rmdir(tmpname)
         except DirectoryNotEmpty, e:
             # There might have been junk left over by a rename that moved
             # another locker within the 'held' directory.  do a slower
             # deletion where we list the directory and remove everything
             # within it.
             #
             # Maybe this should be broader to allow for ftp servers with
             # non-specific error messages?
             self._trace("doing recursive deletion of non-empty directory "
                     "%s", tmpname)
             self.transport.delete_tree(tmpname)
         self._trace("... unlock succeeded after %dms",
                 (time.time() - start_time) * 1000)
         result = lock.LockResult(self.transport.abspath(self.path),
                                  old_nonce)
         for hook in self.hooks['lock_released']:
             hook(result)
Ejemplo n.º 21
0
        def wait_and_switch():
            lf1.attempt_lock()
            # Block until lock2 has had a chance to check
            note('lock1: waiting 1 for checked lock')
            wait_until_checked_lock.acquire()
            note('lock1: acquired for checked lock')
            note('lock1: released lockdir')
            lf1.unlock()
            note('lock1: acquiring lockdir')
            # Create a new nonce, so the lock looks different.
            lf1.nonce = osutils.rand_chars(20)
            lf1.lock_write()
            note('lock1: acquired lockdir')

            # Block until lock2 has peeked again
            note('lock1: waiting 2 for checked lock')
            wait_until_checked_lock.acquire()
            note('lock1: acquired for checked lock')
            # Now unlock, and let lock 2 grab the lock
            lf1.unlock()
            wait_to_check_lock.release()
Ejemplo n.º 22
0
        def wait_and_switch():
            lf1.attempt_lock()
            # Block until lock2 has had a chance to check
            note('lock1: waiting 1 for checked lock')
            wait_until_checked_lock.acquire()
            note('lock1: acquired for checked lock')
            note('lock1: released lockdir')
            lf1.unlock()
            note('lock1: acquiring lockdir')
            # Create a new nonce, so the lock looks different.
            lf1.nonce = osutils.rand_chars(20)
            lf1.lock_write()
            note('lock1: acquired lockdir')

            # Block until lock2 has peeked again
            note('lock1: waiting 2 for checked lock')
            wait_until_checked_lock.acquire()
            note('lock1: acquired for checked lock')
            # Now unlock, and let lock 2 grab the lock
            lf1.unlock()
            wait_to_check_lock.release()
Ejemplo n.º 23
0
    def create_modules(self):
        """Create some random modules to be imported.

        Each entry has a random suffix, and the full names are saved

        These are setup as follows:
         base/ <= used to ensure not in default search path
            root-XXX/
                __init__.py <= This will contain var1, func1
                mod-XXX.py <= This will contain var2, func2
                sub-XXX/
                    __init__.py <= Contains var3, func3
                    submoda-XXX.py <= contains var4, func4
                    submodb-XXX.py <= containse var5, func5
        """
        rand_suffix = osutils.rand_chars(4)
        root_name = 'root_' + rand_suffix
        mod_name = 'mod_' + rand_suffix
        sub_name = 'sub_' + rand_suffix
        submoda_name = 'submoda_' + rand_suffix
        submodb_name = 'submodb_' + rand_suffix

        os.mkdir('base')
        root_path = osutils.pathjoin('base', root_name)
        os.mkdir(root_path)
        root_init = osutils.pathjoin(root_path, '__init__.py')
        f = open(osutils.pathjoin(root_path, '__init__.py'), 'wb')
        try:
            f.write('var1 = 1\ndef func1(a):\n  return a\n')
        finally:
            f.close()
        mod_path = osutils.pathjoin(root_path, mod_name + '.py')
        f = open(mod_path, 'wb')
        try:
            f.write('var2 = 2\ndef func2(a):\n  return a\n')
        finally:
            f.close()

        sub_path = osutils.pathjoin(root_path, sub_name)
        os.mkdir(sub_path)
        f = open(osutils.pathjoin(sub_path, '__init__.py'), 'wb')
        try:
            f.write('var3 = 3\ndef func3(a):\n  return a\n')
        finally:
            f.close()
        submoda_path = osutils.pathjoin(sub_path, submoda_name + '.py')
        f = open(submoda_path, 'wb')
        try:
            f.write('var4 = 4\ndef func4(a):\n  return a\n')
        finally:
            f.close()
        submodb_path = osutils.pathjoin(sub_path, submodb_name + '.py')
        f = open(submodb_path, 'wb')
        try:
            f.write('var5 = 5\ndef func5(a):\n  return a\n')
        finally:
            f.close()
        self.root_name = root_name
        self.mod_name = mod_name
        self.sub_name = sub_name
        self.submoda_name = submoda_name
        self.submodb_name = submodb_name
Ejemplo n.º 24
0
    def create_modules(self):
        """Create some random modules to be imported.

        Each entry has a random suffix, and the full names are saved

        These are setup as follows:
         base/ <= used to ensure not in default search path
            root-XXX/
                __init__.py <= This will contain var1, func1
                mod-XXX.py <= This will contain var2, func2
                sub-XXX/
                    __init__.py <= Contains var3, func3
                    submoda-XXX.py <= contains var4, func4
                    submodb-XXX.py <= containse var5, func5
        """
        rand_suffix = osutils.rand_chars(4)
        root_name = 'root_' + rand_suffix
        mod_name = 'mod_' + rand_suffix
        sub_name = 'sub_' + rand_suffix
        submoda_name = 'submoda_' + rand_suffix
        submodb_name = 'submodb_' + rand_suffix

        os.mkdir('base')
        root_path = osutils.pathjoin('base', root_name)
        os.mkdir(root_path)
        root_init = osutils.pathjoin(root_path, '__init__.py')
        f = open(osutils.pathjoin(root_path, '__init__.py'), 'wb')
        try:
            f.write('var1 = 1\ndef func1(a):\n  return a\n')
        finally:
            f.close()
        mod_path = osutils.pathjoin(root_path, mod_name + '.py')
        f = open(mod_path, 'wb')
        try:
            f.write('var2 = 2\ndef func2(a):\n  return a\n')
        finally:
            f.close()

        sub_path = osutils.pathjoin(root_path, sub_name)
        os.mkdir(sub_path)
        f = open(osutils.pathjoin(sub_path, '__init__.py'), 'wb')
        try:
            f.write('var3 = 3\ndef func3(a):\n  return a\n')
        finally:
            f.close()
        submoda_path = osutils.pathjoin(sub_path, submoda_name + '.py')
        f = open(submoda_path, 'wb')
        try:
            f.write('var4 = 4\ndef func4(a):\n  return a\n')
        finally:
            f.close()
        submodb_path = osutils.pathjoin(sub_path, submodb_name + '.py')
        f = open(submodb_path, 'wb')
        try:
            f.write('var5 = 5\ndef func5(a):\n  return a\n')
        finally:
            f.close()
        self.root_name = root_name
        self.mod_name = mod_name
        self.sub_name = sub_name
        self.submoda_name = submoda_name
        self.submodb_name = submodb_name
Ejemplo n.º 25
0
    def test_fetch_with_fallback_and_merge(self):
        builder = self.make_branch_builder('source', format='pack-0.92')
        builder.start_series()
        # graph
        #   A
        #   |\
        #   B C
        #   | |
        #   | D
        #   | |
        #   | E
        #    \|
        #     F
        # A & B are present in the base (stacked-on) repository, A-E are
        # present in the source.
        # This reproduces bug #304841
        # We need a large enough inventory that total size of compressed deltas
        # is shorter than the size of a compressed fulltext. We have to use
        # random ids because otherwise the inventory fulltext compresses too
        # well and the deltas get bigger.
        to_add = [('add', ('', 'TREE_ROOT', 'directory', None))]
        for i in xrange(10):
            fname = 'file%03d' % (i, )
            fileid = '%s-%s' % (fname, osutils.rand_chars(64))
            to_add.append(('add', (fname, fileid, 'file', 'content\n')))
        builder.build_snapshot('A', None, to_add)
        builder.build_snapshot('B', ['A'], [])
        builder.build_snapshot('C', ['A'], [])
        builder.build_snapshot('D', ['C'], [])
        builder.build_snapshot('E', ['D'], [])
        builder.build_snapshot('F', ['E', 'B'], [])
        builder.finish_series()
        source_branch = builder.get_branch()
        source_branch.bzrdir.sprout('base', revision_id='B')
        target_branch = self.make_branch('target', format='1.6')
        target_branch.set_stacked_on_url('../base')
        source = source_branch.repository
        source.lock_read()
        self.addCleanup(source.unlock)
        source.inventories = versionedfile.OrderingVersionedFilesDecorator(
            source.inventories,
            key_priority={
                ('E', ): 1,
                ('D', ): 2,
                ('C', ): 4,
                ('F', ): 3
            })
        # Ensure that the content is yielded in the proper order, and given as
        # the expected kinds
        records = [(record.key, record.storage_kind)
                   for record in source.inventories.get_record_stream([(
                       'D', ), ('C', ), ('E', ), ('F', )], 'unordered', False)]
        self.assertEqual([(('E', ), 'knit-delta-gz'),
                          (('D', ), 'knit-delta-gz'),
                          (('F', ), 'knit-delta-gz'),
                          (('C', ), 'knit-delta-gz')], records)

        target_branch.lock_write()
        self.addCleanup(target_branch.unlock)
        target = target_branch.repository
        target.fetch(source, revision_id='F')
        # 'C' should be expanded to a fulltext, but D and E should still be
        # deltas
        stream = target.inventories.get_record_stream([('C', ), ('D', ),
                                                       ('E', ), ('F', )],
                                                      'unordered', False)
        kinds = dict((record.key, record.storage_kind) for record in stream)
        self.assertEqual(
            {
                ('C', ): 'knit-ft-gz',
                ('D', ): 'knit-delta-gz',
                ('E', ): 'knit-delta-gz',
                ('F', ): 'knit-delta-gz'
            }, kinds)
Ejemplo n.º 26
0
    def test_fetch_with_fallback_and_merge(self):
        builder = self.make_branch_builder('source', format='pack-0.92')
        builder.start_series()
        # graph
        #   A
        #   |\
        #   B C
        #   | |
        #   | D
        #   | |
        #   | E
        #    \|
        #     F
        # A & B are present in the base (stacked-on) repository, A-E are
        # present in the source.
        # This reproduces bug #304841
        # We need a large enough inventory that total size of compressed deltas
        # is shorter than the size of a compressed fulltext. We have to use
        # random ids because otherwise the inventory fulltext compresses too
        # well and the deltas get bigger.
        to_add = [
            ('add', ('', 'TREE_ROOT', 'directory', None))]
        for i in xrange(10):
            fname = 'file%03d' % (i,)
            fileid = '%s-%s' % (fname, osutils.rand_chars(64))
            to_add.append(('add', (fname, fileid, 'file', 'content\n')))
        builder.build_snapshot('A', None, to_add)
        builder.build_snapshot('B', ['A'], [])
        builder.build_snapshot('C', ['A'], [])
        builder.build_snapshot('D', ['C'], [])
        builder.build_snapshot('E', ['D'], [])
        builder.build_snapshot('F', ['E', 'B'], [])
        builder.finish_series()
        source_branch = builder.get_branch()
        source_branch.bzrdir.sprout('base', revision_id='B')
        target_branch = self.make_branch('target', format='1.6')
        target_branch.set_stacked_on_url('../base')
        source = source_branch.repository
        source.lock_read()
        self.addCleanup(source.unlock)
        source.inventories = versionedfile.OrderingVersionedFilesDecorator(
                        source.inventories,
                        key_priority={('E',): 1, ('D',): 2, ('C',): 4,
                                      ('F',): 3})
        # Ensure that the content is yielded in the proper order, and given as
        # the expected kinds
        records = [(record.key, record.storage_kind)
                   for record in source.inventories.get_record_stream(
                        [('D',), ('C',), ('E',), ('F',)], 'unordered', False)]
        self.assertEqual([(('E',), 'knit-delta-gz'), (('D',), 'knit-delta-gz'),
                          (('F',), 'knit-delta-gz'), (('C',), 'knit-delta-gz')],
                          records)

        target_branch.lock_write()
        self.addCleanup(target_branch.unlock)
        target = target_branch.repository
        target.fetch(source, revision_id='F')
        # 'C' should be expanded to a fulltext, but D and E should still be
        # deltas
        stream = target.inventories.get_record_stream(
            [('C',), ('D',), ('E',), ('F',)],
            'unordered', False)
        kinds = dict((record.key, record.storage_kind) for record in stream)
        self.assertEqual({('C',): 'knit-ft-gz', ('D',): 'knit-delta-gz',
                          ('E',): 'knit-delta-gz', ('F',): 'knit-delta-gz'},
                         kinds)
Ejemplo n.º 27
0
 def test_decode_1k_by_1k_ascii_cached(self):
     """Test decoding 5 revisions 100k times"""
     revisions = [osutils.rand_chars(60) for x in xrange(1000)]
     self.time(self.decode_cached_multi, revisions, 1000)
Ejemplo n.º 28
0
 def test_encode_1k_by_1k_ascii(self):
     """Test encoding 5 revisions 100k times"""
     revisions = [unicode(osutils.rand_chars(60)) for x in xrange(1000)]
     self.time(self.encode_multi, revisions, 1000)
Ejemplo n.º 29
0
 def test_decode_1k_by_1k_unicode_cached(self):
     """Test decoding 5 revisions 100k times"""
     revisions = [(u'\u062c\u0648\u062c\u0648' +
                   unicode(osutils.rand_chars(60))).encode('utf8')
                  for x in xrange(1000)]
     self.time(self.decode_cached_multi, revisions, 1000)