Rev 2893: Review feedback and discussion with Martin - split out the readv offset adjustment into a new helper and document where the design might/should go next. in http://people.ubuntu.com/~robertc/baz2.0/readv

Robert Collins robertc at robertcollins.net
Tue Oct 9 02:59:57 BST 2007


At http://people.ubuntu.com/~robertc/baz2.0/readv

------------------------------------------------------------
revno: 2893
revision-id: robertc at robertcollins.net-20071009015950-oiq91zspjpoeiz6t
parent: robertc at robertcollins.net-20071008044749-07yl1rtr3v9iw62o
committer: Robert Collins <robertc at robertcollins.net>
branch nick: readv
timestamp: Tue 2007-10-09 11:59:50 +1000
message:
  Review feedback and discussion with Martin - split out the readv offset adjustment into a new helper and document where the design might/should go next.
modified:
  bzrlib/transport/__init__.py   transport.py-20050711165921-4978aa7ce1285ad5
=== modified file 'bzrlib/transport/__init__.py'
--- a/bzrlib/transport/__init__.py	2007-10-08 04:47:49 +0000
+++ b/bzrlib/transport/__init__.py	2007-10-09 01:59:50 +0000
@@ -662,54 +662,16 @@
         :return: A list or generator of (offset, data) tuples
         """
         if adjust_for_latency:
-            offsets = sorted(offsets)
-            # short circuit empty requests
-            if len(offsets) == 0:
-                def empty_yielder():
-                    # Quick thunk to stop this function becoming a generator
-                    # itself, rather we return a generator that has nothing to
-                    # yield.
-                    if False:
-                        yield None
-                return empty_yielder()
-            # expand by page size at either end
-            maximum_expansion = self.recommended_page_size()
-            new_offsets = []
-            for offset, length in offsets:
-                expansion = maximum_expansion - length
-                if expansion < 0:
-                    # we're asking for more than the minimum read anyway.
-                    expansion = 0
-                reduction = expansion / 2
-                new_offset = offset - reduction
-                new_length = length + expansion
-                if new_offset < 0:
-                    # don't ask for anything < 0
-                    new_offset = 0
-                if (upper_limit is not None and
-                    new_offset + new_length > upper_limit):
-                    new_length = upper_limit - new_offset
-                new_offsets.append((new_offset, new_length))
-            # combine the expanded offsets
-            offsets = []
-            current_offset, current_length = new_offsets[0]
-            current_finish = current_length + current_offset
-            for offset, length in new_offsets[1:]:
-                finish = offset + length
-                if offset > current_finish:
-                    # there is a gap, output the current accumulator and start
-                    # a new one for the region we're examining.
-                    offsets.append((current_offset, current_length))
-                    current_offset = offset
-                    current_length = length
-                    current_finish = finish
-                    continue
-                if finish > current_finish:
-                    # extend the current accumulator to the end of the region
-                    # we're examining.
-                    current_finish = finish
-                    current_length = finish - current_offset
-            offsets.append((current_offset, current_length))
+            # Design note: We may wish to have different algorithms for the
+            # expansion of the offsets per-transport. E.g. for local disk to
+            # use page-aligned expansion. If that is the case consider the following structure:
+            #  - a test that transport.readv uses self._offset_expander or some similar attribute, to do the expansion
+            #  - a test for each transport that it has some known-good offset expander
+            #  - unit tests for each offset expander
+            #  - a set of tests for the offset expander interface, giving
+            #    baseline behaviour (which the current transport
+            #    adjust_for_latency tests could be repurposed to).
+            offsets = self._sort_expand_and_combine(offsets, upper_limit)
         return self._readv(relpath, offsets)
 
     def _readv(self, relpath, offsets):
@@ -766,6 +728,65 @@
                 yield cur_offset_and_size[0], this_data
                 cur_offset_and_size = offset_stack.next()
 
+    def _sort_expand_and_combine(self, offsets, upper_limit):
+        """Helper for readv.
+
+        :param offsets: A readv vector - (offset, length) tuples.
+        :param upper_limit: The highest byte offset that may be requested.
+        :return: A readv vector that will read all the regions requested by
+            offsets, in start-to-end order, with no duplicated regions,
+            expanded by the transports recommended page size.
+        """
+        offsets = sorted(offsets)
+        # short circuit empty requests
+        if len(offsets) == 0:
+            def empty_yielder():
+                # Quick thunk to stop this function becoming a generator
+                # itself, rather we return a generator that has nothing to
+                # yield.
+                if False:
+                    yield None
+            return empty_yielder()
+        # expand by page size at either end
+        maximum_expansion = self.recommended_page_size()
+        new_offsets = []
+        for offset, length in offsets:
+            expansion = maximum_expansion - length
+            if expansion < 0:
+                # we're asking for more than the minimum read anyway.
+                expansion = 0
+            reduction = expansion / 2
+            new_offset = offset - reduction
+            new_length = length + expansion
+            if new_offset < 0:
+                # don't ask for anything < 0
+                new_offset = 0
+            if (upper_limit is not None and
+                new_offset + new_length > upper_limit):
+                new_length = upper_limit - new_offset
+            new_offsets.append((new_offset, new_length))
+        # combine the expanded offsets
+        offsets = []
+        current_offset, current_length = new_offsets[0]
+        current_finish = current_length + current_offset
+        for offset, length in new_offsets[1:]:
+            finish = offset + length
+            if offset > current_finish:
+                # there is a gap, output the current accumulator and start
+                # a new one for the region we're examining.
+                offsets.append((current_offset, current_length))
+                current_offset = offset
+                current_length = length
+                current_finish = finish
+                continue
+            if finish > current_finish:
+                # extend the current accumulator to the end of the region
+                # we're examining.
+                current_finish = finish
+                current_length = finish - current_offset
+        offsets.append((current_offset, current_length))
+        return offsets
+
     @staticmethod
     def _coalesce_offsets(offsets, limit, fudge_factor):
         """Yield coalesced offsets.



More information about the bazaar-commits mailing list