Rev 4779: Cleanup some code paths. Make _check_key a helper that can be used in http://bazaar.launchpad.net/~jameinel/bzr/2.1-static-tuple-chk-map

John Arbash Meinel john at arbash-meinel.com
Wed Oct 21 22:20:00 BST 2009


At http://bazaar.launchpad.net/~jameinel/bzr/2.1-static-tuple-chk-map

------------------------------------------------------------
revno: 4779
revision-id: john at arbash-meinel.com-20091021211941-3kldzti7r77q9q74
parent: john at arbash-meinel.com-20091021205321-j62yud7d5lfx2za1
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: 2.1-static-tuple-chk-map
timestamp: Wed 2009-10-21 16:19:41 -0500
message:
  Cleanup some code paths. Make _check_key a helper that can be used
  but isn't currently used.
-------------- next part --------------
=== modified file 'bzrlib/chk_map.py'
--- a/bzrlib/chk_map.py	2009-10-21 18:59:47 +0000
+++ b/bzrlib/chk_map.py	2009-10-21 21:19:41 +0000
@@ -222,7 +222,9 @@
         root_key = klass._create_directly(store, initial_value,
             maximum_size=maximum_size, key_width=key_width,
             search_key_func=search_key_func)
-        assert type(root_key) is StaticTuple
+        if type(root_key) is not StaticTuple:
+            raise AssertionError('we got a %s instead of a StaticTuple'
+                                 % (type(root_key),))
         return root_key
 
     @classmethod
@@ -263,11 +265,7 @@
             for split, subnode in node_details:
                 node.add_node(split, subnode)
         keys = list(node.serialise(store))
-        root_node = keys[-1]
-        assert (type(root_node) is StaticTuple
-                and len(root_node) == 1 and
-                type(root_node[0]) is str)
-        return root_node
+        return keys[-1]
 
     def iter_changes(self, basis):
         """Iterate over the changes between basis and self.
@@ -493,7 +491,6 @@
     def iteritems(self, key_filter=None):
         """Iterate over the entire CHKMap's contents."""
         self._ensure_root()
-        # TODO: StaticTuple Barrier here
         if key_filter is not None:
             as_st = StaticTuple.from_sequence
             key_filter = [as_st(key) for key in key_filter]
@@ -502,7 +499,6 @@
     def key(self):
         """Return the key for this map."""
         if type(self._root_node) is StaticTuple:
-            _check_key(self._root_node)
             return self._root_node
         else:
             return self._root_node._key
@@ -536,7 +532,6 @@
         if type(node) is tuple:
             node = StaticTuple.from_sequence(node)
         if type(node) is StaticTuple:
-            _check_key(node)
             return node
         else:
             return node._key
@@ -567,7 +562,6 @@
             # Already saved.
             return self._root_node
         keys = list(self._root_node.serialise(self._store))
-        assert type(keys[-1]) is StaticTuple
         return keys[-1]
 
 
@@ -1025,8 +1019,8 @@
         :return: An InternalNode instance.
         """
         if type(key) is not StaticTuple:
-            import pdb; pdb.set_trace()
-        key = StaticTuple.from_sequence(key).intern()
+            raise AssertionError('deserialise should be called with a'
+                                 ' StaticTuple not %s' % (type(key),))
         return _deserialise_internal_node(bytes, key,
                                           search_key_func=search_key_func)
 
@@ -1460,6 +1454,12 @@
 
     def __init__(self, store, new_root_keys, old_root_keys,
                  search_key_func, pb=None):
+        # TODO: Should we add a StaticTuple barrier here? It would be nice to
+        #       force callers to use StaticTuple, because there will often be
+        #       lots of keys passed in here. And even if we cast it locally,
+        #       that just meanst that we will have *both* a StaticTuple and a
+        #       tuple() in memory, referring to the same object. (so a net
+        #       increase in memory, not a decrease.)
         self._store = store
         self._new_root_keys = new_root_keys
         self._old_root_keys = old_root_keys
@@ -1684,7 +1684,13 @@
 search_key_registry.register('hash-16-way', _search_key_16)
 search_key_registry.register('hash-255-way', _search_key_255)
 
+
 def _check_key(key):
+    """Helper function to assert that a key is properly formatted.
+
+    This generally shouldn't be used in production code, but it can be helpful
+    to debug problems.
+    """
     if type(key) is not StaticTuple:
         raise TypeError('key %r is not StaticTuple but %s' % (key, type(key)))
     if len(key) != 1:

=== modified file 'bzrlib/inventory.py'
--- a/bzrlib/inventory.py	2009-10-21 17:02:47 +0000
+++ b/bzrlib/inventory.py	2009-10-21 21:19:41 +0000
@@ -1614,7 +1614,7 @@
         while directories_to_expand:
             # Expand directories by looking in the
             # parent_id_basename_to_file_id map
-            keys = [StaticTuple(f,) for f in directories_to_expand]
+            keys = [StaticTuple(f,).intern() for f in directories_to_expand]
             directories_to_expand = set()
             items = self.parent_id_basename_to_file_id.iteritems(keys)
             next_file_ids = set([item[1] for item in items])
@@ -1814,8 +1814,7 @@
                 # Update caches. It's worth doing this whether
                 # we're propagating the old caches or not.
                 result._path_to_fileid_cache[new_path] = file_id
-                if entry.parent_id is not None:
-                    parents.add((split(new_path)[0], entry.parent_id))
+                parents.add((split(new_path)[0], entry.parent_id))
             if old_path is None:
                 old_key = None
             else:
@@ -1825,8 +1824,7 @@
                         "Entry was at wrong other path %r." %
                         self.id2path(file_id))
                 altered.add(file_id)
-            # TODO: use a StaticTuple here, though a value may be None
-            id_to_entry_delta.append((old_key, new_key, new_value))
+            id_to_entry_delta.append(StaticTuple(old_key, new_key, new_value))
             if result.parent_id_basename_to_file_id is not None:
                 # parent_id, basename changes
                 if old_path is None:
@@ -2033,7 +2031,7 @@
                 remaining.append(file_id)
             else:
                 result.append(entry)
-        file_keys = [StaticTuple(f,) for f in remaining]
+        file_keys = [StaticTuple(f,).intern() for f in remaining]
         for file_key, value in self.id_to_entry.iteritems(file_keys):
             entry = self._bytes_to_entry(value)
             result.append(entry)

=== modified file 'bzrlib/repofmt/groupcompress_repo.py'
--- a/bzrlib/repofmt/groupcompress_repo.py	2009-10-20 22:13:23 +0000
+++ b/bzrlib/repofmt/groupcompress_repo.py	2009-10-21 21:19:41 +0000
@@ -55,7 +55,6 @@
     )
 from bzrlib.static_tuple import StaticTuple
 
-_check_key = chk_map._check_key
 
 class GCPack(NewPack):
 
@@ -816,11 +815,11 @@
                                  ' no new_path %r' % (file_id,))
             if new_path == '':
                 new_inv.root_id = file_id
-                parent_id_basename_key = StaticTuple('', '')
+                parent_id_basename_key = StaticTuple('', '').intern()
             else:
                 utf8_entry_name = entry.name.encode('utf-8')
                 parent_id_basename_key = StaticTuple(entry.parent_id,
-                                                     utf8_entry_name)
+                                                     utf8_entry_name).intern()
             new_value = entry_to_bytes(entry)
             # Populate Caches?
             # new_inv._path_to_fileid_cache[new_path] = file_id
@@ -1174,8 +1173,6 @@
     for inv in repo.iter_inventories(inventory_ids, 'unordered'):
         root_key = inv.id_to_entry.key()
         pid_root_key = inv.parent_id_basename_to_file_id.key()
-        _check_key(root_key)
-        _check_key(pid_root_key)
         if inv.revision_id in parent_only_inv_ids:
             result.uninteresting_root_keys.add(root_key)
             result.uninteresting_pid_root_keys.add(pid_root_key)



More information about the bazaar-commits mailing list