Rev 4667: An alternative implementation that passes both tests. in http://bazaar.launchpad.net/~jameinel/bzr/2.1b1-pack-on-the-fly

John Arbash Meinel john at arbash-meinel.com
Tue Sep 1 22:21:59 BST 2009


At http://bazaar.launchpad.net/~jameinel/bzr/2.1b1-pack-on-the-fly

------------------------------------------------------------
revno: 4667
revision-id: john at arbash-meinel.com-20090901212153-lpuduugn7xvpvanl
parent: john at arbash-meinel.com-20090901211316-nefngqah1yutdikv
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: 2.1b1-pack-on-the-fly
timestamp: Tue 2009-09-01 16:21:53 -0500
message:
  An alternative implementation that passes both tests.
  Basically, instead of always rebuilding all blocks, just mark blocks
  that have a single record as needing to be repacked.
  This isn't a great solution, but it is one-step inbetween always rebuild
  and never rebuild.
-------------- next part --------------
=== modified file 'bzrlib/groupcompress.py'
--- a/bzrlib/groupcompress.py	2009-09-01 21:13:16 +0000
+++ b/bzrlib/groupcompress.py	2009-09-01 21:21:53 +0000
@@ -1516,8 +1516,7 @@
         # test_insert_record_stream_existing_keys fail for groupcompress and
         # groupcompress-nograph, this needs to be revisited while addressing
         # 'bzr branch' performance issues.
-        for _ in self._insert_record_stream(stream, random_id=False,
-            reuse_blocks=False):
+        for _ in self._insert_record_stream(stream, random_id=False):
             pass
 
     def _insert_record_stream(self, stream, random_id=False, nostore_sha=None,
@@ -1581,13 +1580,22 @@
                                ' but then inserted %r two times', record.key)
                     continue
                 inserted_keys.add(record.key)
-            if not inserted_keys and reuse_blocks:
+            reuse_this_block = reuse_blocks
+            if reuse_this_block:
                 # If the reuse_blocks flag is set, check to see if we can just
                 # copy a groupcompress block as-is.
                 if record.storage_kind == 'groupcompress-block':
+                    # Check to see if we really want to re-use this block
+                    insert_manager = record._manager
+                    if len(insert_manager._factories) == 1:
+                        # This block only has a single record in it
+                        # Mark this block to be rebuilt
+                        reuse_this_block = False
+            if reuse_this_block:
+                # We still want to reuse this block
+                if record.storage_kind == 'groupcompress-block':
                     # Insert the raw block into the target repo
                     insert_manager = record._manager
-                    insert_manager._check_rebuild_block()
                     bytes = record._manager._block.to_bytes()
                     _, start, length = self._access.add_raw_records(
                         [(None, len(bytes))], bytes)[0]

=== modified file 'bzrlib/tests/test_repository.py'
--- a/bzrlib/tests/test_repository.py	2009-09-01 06:10:24 +0000
+++ b/bzrlib/tests/test_repository.py	2009-09-01 21:21:53 +0000
@@ -696,6 +696,7 @@
         target = self.make_repository('target', format='2a')
         target.fetch(source.repository)
         target.lock_read()
+        self.addCleanup(target.unlock)
         details = target.texts._index.get_build_details(
             [('file-id', '1',), ('file-id', '2',)])
         file_1_details = details[('file-id', '1')]



More information about the bazaar-commits mailing list