Rev 3916: Cleanup asserts in file:///home/vila/src/bzr/experimental/brisbane-core/

Vincent Ladeuil v.ladeuil+lp at free.fr
Tue Mar 31 17:39:35 BST 2009


At file:///home/vila/src/bzr/experimental/brisbane-core/

------------------------------------------------------------
revno: 3916 [merge]
revision-id: v.ladeuil+lp at free.fr-20090331163934-yd5q8w4xee8qva1i
parent: john at arbash-meinel.com-20090331161433-h2ryvmhhuhp31k92
parent: v.ladeuil+lp at free.fr-20090331162047-0ysybpncrnschkdc
committer: Vincent Ladeuil <v.ladeuil+lp at free.fr>
branch nick: brisbane-core
timestamp: Tue 2009-03-31 18:39:34 +0200
message:
  Cleanup asserts
modified:
  bzrlib/_chk_map_py.py          _chk_map_py.py-20090309114220-1kurz7oez2gwqtcf-1
  bzrlib/chk_map.py              chk_map.py-20081001014447-ue6kkuhofvdecvxa-1
  bzrlib/groupcompress.py        groupcompress.py-20080705181503-ccbxd6xuy1bdnrpu-8
  bzrlib/inventory.py            inventory.py-20050309040759-6648b84ca2005b37
  bzrlib/repofmt/groupcompress_repo.py repofmt.py-20080715094215-wp1qfvoo7093c8qr-1
  bzrlib/repofmt/pack_repo.py    pack_repo.py-20070813041115-gjv5ma7ktfqwsjgn-1
  bzrlib/tests/test_groupcompress.py test_groupcompress.p-20080705181503-ccbxd6xuy1bdnrpu-13
-------------- next part --------------
=== modified file 'bzrlib/_chk_map_py.py'
--- a/bzrlib/_chk_map_py.py	2009-03-25 07:54:11 +0000
+++ b/bzrlib/_chk_map_py.py	2009-03-31 16:04:31 +0000
@@ -142,6 +142,8 @@
         line = common_prefix + line
         prefix, flat_key = line.rsplit('\x00', 1)
         items[prefix] = (flat_key,)
+    if len(items) == 0:
+        raise AssertionError("We didn't find any item for %s" % key)
     result._items = items
     result._len = length
     result._maximum_size = maximum_size
@@ -151,7 +153,6 @@
     #      change if we add prefix compression
     result._raw_size = None # len(bytes)
     result._node_width = len(prefix)
-    assert len(items) > 0
     result._search_prefix = common_prefix
     return result
 

=== modified file 'bzrlib/chk_map.py'
--- a/bzrlib/chk_map.py	2009-03-30 21:13:24 +0000
+++ b/bzrlib/chk_map.py	2009-03-31 16:04:31 +0000
@@ -413,7 +413,9 @@
                             self_pending)
                         basis_prefix, _, basis_node, basis_path = heapq.heappop(
                             basis_pending)
-                        assert self_prefix == basis_prefix
+                        if self_prefix != basis_prefix:
+                            raise AssertionError(
+                                '%r != %r' % (self_prefix, basis_prefix))
                         process_common_prefix_nodes(
                             self_node, self_path,
                             basis_node, basis_path)
@@ -736,7 +738,8 @@
 
         :return: (common_serialised_prefix, [(node_serialised_prefix, node)])
         """
-        assert self._search_prefix is not _unknown
+        if self._search_prefix is _unknown:
+            raise AssertionError('%r must be known' % self._search_prefix)
         common_prefix = self._search_prefix
         split_at = len(common_prefix) + 1
         result = {}
@@ -772,7 +775,8 @@
         if self._map_no_split(key, value):
             return self._split(store)
         else:
-            assert self._search_prefix is not _unknown
+            if self._search_prefix is _unknown:
+                raise AssertionError('%r must be known' % self._search_prefix)
             return self._search_prefix, [("", self)]
 
     def serialise(self, store):

=== modified file 'bzrlib/groupcompress.py'
--- a/bzrlib/groupcompress.py	2009-03-31 13:52:33 +0000
+++ b/bzrlib/groupcompress.py	2009-03-31 15:53:49 +0000
@@ -96,7 +96,9 @@
 
     # Group Compress Block v1 Zlib
     GCB_HEADER = 'gcb1z\n'
+    # Group Compress Block v1 Lzma
     GCB_LZ_HEADER = 'gcb1l\n'
+    GCB_KNOWN_HEADERS = (GCB_HEADER, GCB_LZ_HEADER)
 
     def __init__(self):
         # map by key? or just order in file?
@@ -128,18 +130,22 @@
         #       _z_content because of this.
         if num_bytes is None:
             num_bytes = self._content_length
-        if self._content_length is not None:
-            assert num_bytes <= self._content_length
+        elif (self._content_length is not None
+              and num_bytes > self._content_length):
+            raise AssertionError(
+                'requested num_bytes (%d) > content length (%d)'
+                % (num_bytes, self._content_length))
+        # Expand the content if required
         if self._content is None:
-            assert self._z_content is not None
+            if self._z_content is None:
+                raise AssertionError('No content to decompress')
             if self._z_content == '':
                 self._content = ''
             elif self._compressor_name == 'lzma':
                 # We don't do partial lzma decomp yet
                 self._content = pylzma.decompress(self._z_content)
-            else:
+            elif self._compressor_name == 'zlib':
                 # Start a zlib decompressor
-                assert self._compressor_name == 'zlib'
                 if num_bytes is None:
                     self._content = zlib.decompress(self._z_content)
                 else:
@@ -148,8 +154,12 @@
                     # that the rest of the code is simplified
                     self._content = self._z_content_decompressor.decompress(
                         self._z_content, num_bytes + _ZLIB_DECOMP_WINDOW)
-                # Any bytes remaining to be decompressed will be in the
-                # decompressors 'unconsumed_tail'
+            else:
+                raise AssertionError('Unkown compressor: %r'
+                                     % self._z_content_decompressor)
+        # Any bytes remaining to be decompressed will be in the decompressors
+        # 'unconsumed_tail'
+
         # Do we have enough bytes already?
         if num_bytes is not None and len(self._content) >= num_bytes:
             return
@@ -157,23 +167,26 @@
             # We must have already decompressed everything
             return
         # If we got this far, and don't have a decompressor, something is wrong
-        assert self._z_content_decompressor is not None
+        if self._z_content_decompressor is None:
+            raise AssertionError(
+                'No decompresor to decompress %d bytes' % num_bytes)
         remaining_decomp = self._z_content_decompressor.unconsumed_tail
         if num_bytes is None:
             if remaining_decomp:
                 # We don't know how much is left, but we'll decompress it all
                 self._content += self._z_content_decompressor.decompress(
                     remaining_decomp)
-                # Note: There what I consider a bug in zlib.decompressobj
+                # Note: There's what I consider a bug in zlib.decompressobj
                 #       If you pass back in the entire unconsumed_tail, only
                 #       this time you don't pass a max-size, it doesn't
                 #       change the unconsumed_tail back to None/''.
                 #       However, we know we are done with the whole stream
                 self._z_content_decompressor = None
+            # XXX: Why is this the only place in this routine we set this?
             self._content_length = len(self._content)
         else:
-            # If we have nothing left to decomp, we ran out of decomp bytes
-            assert remaining_decomp
+            if not remaining_decomp:
+                raise AssertionError('Nothing left to decompress')
             needed_bytes = num_bytes - len(self._content)
             # We always set max_size to 32kB over the minimum needed, so that
             # zlib will give us as much as we really want.
@@ -181,7 +194,9 @@
             #       that keeps expanding the request until we get enough
             self._content += self._z_content_decompressor.decompress(
                 remaining_decomp, needed_bytes + _ZLIB_DECOMP_WINDOW)
-            assert len(self._content) >= num_bytes
+            if len(self._content) < num_bytes:
+                raise AssertionError('%d bytes wanted, only %d available'
+                                     % (num_bytes, len(self._content)))
             if not self._z_content_decompressor.unconsumed_tail:
                 # The stream is finished
                 self._z_content_decompressor = None
@@ -202,15 +217,19 @@
         pos2 = bytes.index('\n', pos, pos + 14)
         self._content_length = int(bytes[pos:pos2])
         pos = pos2 + 1
-        assert len(bytes) == (pos + self._z_content_length)
+        if len(bytes) != (pos + self._z_content_length):
+            # XXX: Define some GCCorrupt error ?
+            raise AssertionError('Invalid bytes: (%d) != %d + %d' %
+                                 (len(bytes), pos, self._z_content_length))
         self._z_content = bytes[pos:]
-        assert len(self._z_content) == self._z_content_length
 
     @classmethod
     def from_bytes(cls, bytes):
         out = cls()
-        if bytes[:6] not in (cls.GCB_HEADER, cls.GCB_LZ_HEADER):
-            raise ValueError('bytes did not start with %r' % (cls.GCB_HEADER,))
+        if bytes[:6] not in cls.GCB_KNOWN_HEADERS:
+            raise ValueError('bytes did not start with any of %r'
+                             % (cls.GCB_KNOWN_HEADERS,))
+        # XXX: why not testing the whole header ?
         if bytes[4] == 'z':
             out._compressor_name = 'zlib'
         elif bytes[4] == 'l':
@@ -266,7 +285,8 @@
         if _USE_LZMA:
             compress = pylzma.compress
         if self._z_content is None:
-            assert self._content is not None
+            if self._content is None:
+                raise AssertionError('Nothing to compress')
             self._z_content = compress(self._content)
             self._z_content_length = len(self._z_content)
         if _USE_LZMA:
@@ -1344,8 +1364,8 @@
                     block_length = length
                 if record.storage_kind in ('groupcompress-block',
                                            'groupcompress-block-ref'):
-                    assert insert_manager is not None
-                    assert record._manager is insert_manager
+                    if insert_manager is None:
+                        raise AssertionError('No insert_manager set')
                     value = "%d %d %d %d" % (block_start, block_length,
                                              record._start, record._end)
                     nodes = [(record.key, value, (record.parents,))]

=== modified file 'bzrlib/inventory.py'
--- a/bzrlib/inventory.py	2009-03-30 11:49:32 +0000
+++ b/bzrlib/inventory.py	2009-03-31 16:08:55 +0000
@@ -1653,7 +1653,8 @@
         :return: A CHKInventory
         """
         lines = bytes.split('\n')
-        assert lines[-1] == ''
+        if lines[-1] != '':
+            raise AssertionError('bytes to deserialize must end with an eol')
         lines.pop()
         if lines[0] != 'chkinventory:':
             raise ValueError("not a serialised CHKInventory: %r" % bytes)

=== modified file 'bzrlib/repofmt/groupcompress_repo.py'
--- a/bzrlib/repofmt/groupcompress_repo.py	2009-03-31 10:08:59 +0000
+++ b/bzrlib/repofmt/groupcompress_repo.py	2009-03-31 16:20:47 +0000
@@ -175,7 +175,8 @@
                                           reload_func=reload_func)
         self._pack_collection = pack_collection
         # ATM, We only support this for GCCHK repositories
-        assert pack_collection.chk_index is not None
+        if pack_collection.chk_index is None:
+            raise AssertionError('pack_collection.chk_index should not be None')
         self._gather_text_refs = False
         self._chk_id_roots = []
         self._chk_p_id_roots = []
@@ -210,7 +211,8 @@
                     self._chk_id_roots.append(key)
                     id_roots_set.add(key)
                 p_id_map = chk_inv.parent_id_basename_to_file_id
-                assert p_id_map is not None
+                if p_id_map is None:
+                    raise AssertionError('Parent id -> file_id map not set')
                 key = p_id_map.key()
                 if key not in p_id_roots_set:
                     p_id_roots_set.add(key)
@@ -330,7 +332,8 @@
         access = knit._DirectPackAccess(index_to_pack)
         if for_write:
             # Use new_pack
-            assert self.new_pack is not None
+            if self.new_pack is None:
+                raise AssertionError('No new pack has been set')
             index = getattr(self.new_pack, index_name)
             index_to_pack[index] = self.new_pack.access_tuple()
             index.set_optimize(for_size=True)
@@ -720,7 +723,6 @@
         raise errors.UnsuspendableWriteGroup(self)
 
     def _reconcile_pack(self, collection, packs, extension, revs, pb):
-        # assert revs is None
         packer = GCCHKReconcilePacker(collection, packs, extension)
         return packer.pack(pb)
 

=== modified file 'bzrlib/repofmt/pack_repo.py'
--- a/bzrlib/repofmt/pack_repo.py	2009-03-31 07:44:14 +0000
+++ b/bzrlib/repofmt/pack_repo.py	2009-03-31 16:20:47 +0000
@@ -2225,7 +2225,8 @@
             for index, key, value, refs in revision_nodes:
                 node = (index, key, value, refs)
                 index_memo = self.revisions._index._node_to_position(node)
-                assert index_memo[0] == index
+                if index_memo[0] != index:
+                    raise AssertionError('%r != %r' % (index_memo[0], index))
                 index_positions.append((index_memo, key[0],
                                        tuple(parent[0] for parent in refs[0])))
                 pb.update("Reading revision index", 0, 0)

=== modified file 'bzrlib/tests/test_groupcompress.py'
--- a/bzrlib/tests/test_groupcompress.py	2009-03-31 10:08:59 +0000
+++ b/bzrlib/tests/test_groupcompress.py	2009-03-31 15:53:49 +0000
@@ -310,6 +310,11 @@
         self.assertEqual('', block._z_content)
         block._ensure_content() # Ensure content is safe to call 2x
 
+    def test_from_invalid(self):
+        self.assertRaises(ValueError,
+                          groupcompress.GroupCompressBlock.from_bytes,
+                          'this is not a valid header')
+
     def test_from_bytes(self):
         content = ('a tiny bit of content\n')
         z_content = zlib.compress(content)



More information about the bazaar-commits mailing list