扫码登录,获取cookies

This commit is contained in:
2026-03-09 16:10:29 +08:00
parent 754e720ba7
commit 8229208165
7775 changed files with 1150053 additions and 208 deletions

View File

@@ -0,0 +1,16 @@
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from hypothesis.internal.conjecture.shrinking.floats import Float
from hypothesis.internal.conjecture.shrinking.integer import Integer
from hypothesis.internal.conjecture.shrinking.lexical import Lexical
from hypothesis.internal.conjecture.shrinking.ordering import Ordering
__all__ = ["Lexical", "Integer", "Ordering", "Float"]

View File

@@ -0,0 +1,175 @@
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""This module implements various useful common functions for shrinking tasks."""
class Shrinker:
"""A Shrinker object manages a single value and a predicate it should
satisfy, and attempts to improve it in some direction, making it smaller
and simpler."""
def __init__(
self,
initial,
predicate,
random,
*,
full=False,
debug=False,
name=None,
**kwargs,
):
self.setup(**kwargs)
self.current = self.make_immutable(initial)
self.initial = self.current
self.random = random
self.full = full
self.changes = 0
self.name = name
self.__predicate = predicate
self.__seen = set()
self.debugging_enabled = debug
@property
def calls(self):
return len(self.__seen)
def __repr__(self):
return "{}({}initial={!r}, current={!r})".format(
type(self).__name__,
"" if self.name is None else f"{self.name!r}, ",
self.initial,
self.current,
)
def setup(self, **kwargs):
"""Runs initial setup code.
Convenience function for children that doesn't require messing
with the signature of init.
"""
def delegate(self, other_class, convert_to, convert_from, **kwargs):
"""Delegates shrinking to another shrinker class, by converting the
current value to and from it with provided functions."""
self.call_shrinker(
other_class,
convert_to(self.current),
lambda v: self.consider(convert_from(v)),
**kwargs,
)
def call_shrinker(self, other_class, initial, predicate, **kwargs):
"""Calls another shrinker class, passing through the relevant context
variables.
Note we explicitly do not pass through full.
"""
return other_class.shrink(initial, predicate, random=self.random, **kwargs)
def debug(self, *args):
if self.debugging_enabled:
print("DEBUG", self, *args)
@classmethod
def shrink(cls, initial, predicate, **kwargs):
"""Shrink the value ``initial`` subject to the constraint that it
satisfies ``predicate``.
Returns the shrunk value.
"""
shrinker = cls(initial, predicate, **kwargs)
shrinker.run()
return shrinker.current
def run(self):
"""Run for an appropriate number of steps to improve the current value.
If self.full is True, will run until no further improvements can
be found.
"""
if self.short_circuit():
return
if self.full:
prev = -1
while self.changes != prev:
prev = self.changes
self.run_step()
else:
self.run_step()
self.debug("COMPLETE")
def incorporate(self, value):
"""Try using ``value`` as a possible candidate improvement.
Return True if it works.
"""
value = self.make_immutable(value)
self.check_invariants(value)
if not self.left_is_better(value, self.current):
if value != self.current and (value == value):
self.debug(f"Rejected {value!r} as worse than {self.current=}")
return False
if value in self.__seen:
return False
self.__seen.add(value)
if self.__predicate(value):
self.debug(f"shrinking to {value!r}")
self.changes += 1
self.current = value
return True
return False
def consider(self, value):
"""Returns True if make_immutable(value) == self.current after calling
self.incorporate(value)."""
value = self.make_immutable(value)
if value == self.current:
return True
return self.incorporate(value)
def make_immutable(self, value):
"""Convert value into an immutable (and hashable) representation of
itself.
It is these immutable versions that the shrinker will work on.
Defaults to just returning the value.
"""
return value
def check_invariants(self, value):
"""Make appropriate assertions about the value to ensure that it is
valid for this shrinker.
Does nothing by default.
"""
raise NotImplementedError
def short_circuit(self):
"""Possibly attempt to do some shrinking.
If this returns True, the ``run`` method will terminate early
without doing any more work.
"""
raise NotImplementedError
def left_is_better(self, left, right):
"""Returns True if the left is strictly simpler than the right
according to the standards of this shrinker."""
raise NotImplementedError
def run_step(self):
"""Run a single step of the main shrink loop, attempting to improve the
current value."""
raise NotImplementedError

View File

@@ -0,0 +1,338 @@
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""
This is a module for learning new DFAs that help normalize test
functions. That is, given a test function that sometimes shrinks
to one thing and sometimes another, this module is designed to
help learn new DFA-based shrink passes that will cause it to
always shrink to the same thing.
"""
import hashlib
import math
from itertools import islice
from pathlib import Path
from hypothesis import HealthCheck, settings
from hypothesis.errors import HypothesisException
from hypothesis.internal.conjecture.data import ConjectureResult, Status
from hypothesis.internal.conjecture.dfa.lstar import LStar
from hypothesis.internal.conjecture.shrinking.learned_dfas import (
SHRINKING_DFAS,
__file__ as _learned_dfa_file,
)
learned_dfa_file = Path(_learned_dfa_file)
class FailedToNormalise(HypothesisException):
pass
def update_learned_dfas():
"""Write any modifications to the SHRINKING_DFAS dictionary
back to the learned DFAs file."""
source = learned_dfa_file.read_text(encoding="utf-8")
lines = source.splitlines()
i = lines.index("# AUTOGENERATED BEGINS")
del lines[i + 1 :]
lines.append("")
lines.append("# fmt: off")
lines.append("")
for k, v in sorted(SHRINKING_DFAS.items()):
lines.append(f"SHRINKING_DFAS[{k!r}] = {v!r} # noqa: E501")
lines.append("")
lines.append("# fmt: on")
new_source = "\n".join(lines) + "\n"
if new_source != source:
learned_dfa_file.write_text(new_source, encoding="utf-8")
def learn_a_new_dfa(runner, u, v, predicate):
"""Given two buffers ``u`` and ``v```, learn a DFA that will
allow the shrinker to normalise them better. ``u`` and ``v``
should not currently shrink to the same test case when calling
this function."""
from hypothesis.internal.conjecture.shrinker import dfa_replacement, sort_key
assert predicate(runner.cached_test_function(u))
assert predicate(runner.cached_test_function(v))
u_shrunk = fully_shrink(runner, u, predicate)
v_shrunk = fully_shrink(runner, v, predicate)
u, v = sorted((u_shrunk.buffer, v_shrunk.buffer), key=sort_key)
assert u != v
assert not v.startswith(u)
# We would like to avoid using LStar on large strings as its
# behaviour can be quadratic or worse. In order to help achieve
# this we peel off a common prefix and suffix of the two final
# results and just learn the internal bit where they differ.
#
# This potentially reduces the length quite far if there's
# just one tricky bit of control flow we're struggling to
# reduce inside a strategy somewhere and the rest of the
# test function reduces fine.
if v.endswith(u):
prefix = b""
suffix = u
u_core = b""
assert len(u) > 0
v_core = v[: -len(u)]
else:
i = 0
while u[i] == v[i]:
i += 1
prefix = u[:i]
assert u.startswith(prefix)
assert v.startswith(prefix)
i = 1
while u[-i] == v[-i]:
i += 1
suffix = u[max(len(prefix), len(u) + 1 - i) :]
assert u.endswith(suffix)
assert v.endswith(suffix)
u_core = u[len(prefix) : len(u) - len(suffix)]
v_core = v[len(prefix) : len(v) - len(suffix)]
assert u == prefix + u_core + suffix, (list(u), list(v))
assert v == prefix + v_core + suffix, (list(u), list(v))
better = runner.cached_test_function(u)
worse = runner.cached_test_function(v)
allow_discards = worse.has_discards or better.has_discards
def is_valid_core(s):
if not (len(u_core) <= len(s) <= len(v_core)):
return False
buf = prefix + s + suffix
result = runner.cached_test_function(buf)
return (
predicate(result)
# Because we're often using this to learn strategies
# rather than entire complex test functions, it's
# important that our replacements are precise and
# don't leave the rest of the test case in a weird
# state.
and result.buffer == buf
# Because the shrinker is good at removing discarded
# data, unless we need discards to allow one or both
# of u and v to result in valid shrinks, we don't
# count attempts that have them as valid. This will
# cause us to match fewer strings, which will make
# the resulting shrink pass more efficient when run
# on test functions it wasn't really intended for.
and (allow_discards or not result.has_discards)
)
assert sort_key(u_core) < sort_key(v_core)
assert is_valid_core(u_core)
assert is_valid_core(v_core)
learner = LStar(is_valid_core)
prev = -1
while learner.generation != prev:
prev = learner.generation
learner.learn(u_core)
learner.learn(v_core)
# L* has a tendency to learn DFAs which wrap around to
# the beginning. We don't want to it to do that unless
# it's accurate, so we use these as examples to show
# check going around the DFA twice.
learner.learn(u_core * 2)
learner.learn(v_core * 2)
if learner.dfa.max_length(learner.dfa.start) > len(v_core):
# The language we learn is finite and bounded above
# by the length of v_core. This is important in order
# to keep our shrink passes reasonably efficient -
# otherwise they can match far too much. So whenever
# we learn a DFA that could match a string longer
# than len(v_core) we fix it by finding the first
# string longer than v_core and learning that as
# a correction.
x = next(learner.dfa.all_matching_strings(min_length=len(v_core) + 1))
assert not is_valid_core(x)
learner.learn(x)
assert not learner.dfa.matches(x)
assert learner.generation != prev
else:
# We mostly care about getting the right answer on the
# minimal test case, but because we're doing this offline
# anyway we might as well spend a little more time trying
# small examples to make sure the learner gets them right.
for x in islice(learner.dfa.all_matching_strings(), 100):
if not is_valid_core(x):
learner.learn(x)
assert learner.generation != prev
break
# We've now successfully learned a DFA that works for shrinking
# our failed normalisation further. Canonicalise it into a concrete
# DFA so we can save it for later.
new_dfa = learner.dfa.canonicalise()
assert math.isfinite(new_dfa.max_length(new_dfa.start))
shrinker = runner.new_shrinker(runner.cached_test_function(v), predicate)
assert (len(prefix), len(v) - len(suffix)) in shrinker.matching_regions(new_dfa)
name = "tmp-dfa-" + repr(new_dfa)
shrinker.extra_dfas[name] = new_dfa
shrinker.fixate_shrink_passes([dfa_replacement(name)])
assert sort_key(shrinker.buffer) < sort_key(v)
return new_dfa
def fully_shrink(runner, test_case, predicate):
if not isinstance(test_case, ConjectureResult):
test_case = runner.cached_test_function(test_case)
while True:
shrunk = runner.shrink(test_case, predicate)
if shrunk.buffer == test_case.buffer:
break
test_case = shrunk
return test_case
def normalize(
base_name,
test_function,
*,
required_successes=100,
allowed_to_update=False,
max_dfas=10,
random=None,
):
"""Attempt to ensure that this test function successfully normalizes - i.e.
whenever it declares a test case to be interesting, we are able
to shrink that to the same interesting test case (which logically should
be the shortlex minimal interesting test case, though we may not be able
to detect if it is).
Will run until we have seen ``required_successes`` many interesting test
cases in a row normalize to the same value.
If ``allowed_to_update`` is True, whenever we fail to normalize we will
learn a new DFA-based shrink pass that allows us to make progress. Any
learned DFAs will be written back into the learned DFA file at the end
of this function. If ``allowed_to_update`` is False, this will raise an
error as soon as it encounters a failure to normalize.
Additionally, if more than ``max_dfas` DFAs are required to normalize
this test function, this function will raise an error - it's essentially
designed for small patches that other shrink passes don't cover, and
if it's learning too many patches then you need a better shrink pass
than this can provide.
"""
# Need import inside the function to avoid circular imports
from hypothesis.internal.conjecture.engine import BUFFER_SIZE, ConjectureRunner
runner = ConjectureRunner(
test_function,
settings=settings(database=None, suppress_health_check=list(HealthCheck)),
ignore_limits=True,
random=random,
)
seen = set()
dfas_added = 0
found_interesting = False
consecutive_successes = 0
failures_to_find_interesting = 0
while consecutive_successes < required_successes:
attempt = runner.cached_test_function(b"", extend=BUFFER_SIZE)
if attempt.status < Status.INTERESTING:
failures_to_find_interesting += 1
assert (
found_interesting or failures_to_find_interesting <= 1000
), "Test function seems to have no interesting test cases"
continue
found_interesting = True
target = attempt.interesting_origin
def shrinking_predicate(d):
return d.status == Status.INTERESTING and d.interesting_origin == target
if target not in seen:
seen.add(target)
runner.shrink(attempt, shrinking_predicate)
continue
previous = fully_shrink(
runner, runner.interesting_examples[target], shrinking_predicate
)
current = fully_shrink(runner, attempt, shrinking_predicate)
if current.buffer == previous.buffer:
consecutive_successes += 1
continue
consecutive_successes = 0
if not allowed_to_update:
raise FailedToNormalise(
f"Shrinker failed to normalize {previous.buffer!r} to "
f"{current.buffer!r} and we are not allowed to learn new DFAs."
)
if dfas_added >= max_dfas:
raise FailedToNormalise(
f"Test function is too hard to learn: Added {dfas_added} "
"DFAs and still not done."
)
dfas_added += 1
new_dfa = learn_a_new_dfa(
runner, previous.buffer, current.buffer, shrinking_predicate
)
name = base_name + "-" + hashlib.sha256(repr(new_dfa).encode()).hexdigest()[:10]
# If there is a name collision this DFA should already be being
# used for shrinking, so we should have already been able to shrink
# v further.
assert name not in SHRINKING_DFAS
SHRINKING_DFAS[name] = new_dfa
if dfas_added > 0:
# We've learned one or more DFAs in the course of normalising, so now
# we update the file to record those for posterity.
update_learned_dfas()

View File

@@ -0,0 +1,90 @@
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import math
import sys
from hypothesis.internal.conjecture.floats import float_to_lex
from hypothesis.internal.conjecture.shrinking.common import Shrinker
from hypothesis.internal.conjecture.shrinking.integer import Integer
MAX_PRECISE_INTEGER = 2**53
class Float(Shrinker):
def setup(self):
self.NAN = math.nan
self.debugging_enabled = True
def make_immutable(self, f):
f = float(f)
if math.isnan(f):
# Always use the same NAN so it works properly in self.seen
f = self.NAN
return f
def check_invariants(self, value):
# We only handle positive floats because we encode the sign separately
# anyway.
assert not (value < 0)
def left_is_better(self, left, right):
lex1 = float_to_lex(left)
lex2 = float_to_lex(right)
return lex1 < lex2
def short_circuit(self):
# We check for a bunch of standard "large" floats. If we're currently
# worse than them and the shrink downwards doesn't help, abort early
# because there's not much useful we can do here.
for g in [sys.float_info.max, math.inf, math.nan]:
self.consider(g)
# If we're stuck at a nasty float don't try to shrink it further.
if not math.isfinite(self.current):
return True
# If its too large to represent as an integer, bail out here. It's
# better to try shrinking it in the main representation.
return self.current >= MAX_PRECISE_INTEGER
def run_step(self):
# Finally we get to the important bit: Each of these is a small change
# to the floating point number that corresponds to a large change in
# the lexical representation. Trying these ensures that our floating
# point shrink can always move past these obstacles. In particular it
# ensures we can always move to integer boundaries and shrink past a
# change that would require shifting the exponent while not changing
# the float value much.
# First, try dropping precision bits by rounding the scaled value. We
# try values ordered from least-precise (integer) to more precise, ie.
# approximate lexicographical order. Once we find an acceptable shrink,
# self.consider discards the remaining attempts early and skips test
# invocation. The loop count sets max fractional bits to keep, and is a
# compromise between completeness and performance.
for p in range(10):
scaled = self.current * 2**p # note: self.current may change in loop
for truncate in [math.floor, math.ceil]:
self.consider(truncate(scaled) / 2**p)
if self.consider(int(self.current)):
self.debug("Just an integer now")
self.delegate(Integer, convert_to=int, convert_from=float)
return
# Now try to minimize the top part of the fraction as an integer. This
# basically splits the float as k + x with 0 <= x < 1 and minimizes
# k as an integer, but without the precision issues that would have.
m, n = self.current.as_integer_ratio()
i, r = divmod(m, n)
self.call_shrinker(Integer, i, lambda k: self.consider((k * n + r) / n))

View File

@@ -0,0 +1,75 @@
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from hypothesis.internal.conjecture.junkdrawer import find_integer
from hypothesis.internal.conjecture.shrinking.common import Shrinker
"""
This module implements a shrinker for non-negative integers.
"""
class Integer(Shrinker):
"""Attempts to find a smaller integer. Guaranteed things to try ``0``,
``1``, ``initial - 1``, ``initial - 2``. Plenty of optimisations beyond
that but those are the guaranteed ones.
"""
def short_circuit(self):
for i in range(2):
if self.consider(i):
return True
self.mask_high_bits()
if self.size > 8:
# see if we can squeeze the integer into a single byte.
self.consider(self.current >> (self.size - 8))
self.consider(self.current & 0xFF)
return self.current == 2
def check_invariants(self, value):
assert value >= 0
def left_is_better(self, left, right):
return left < right
def run_step(self):
self.shift_right()
self.shrink_by_multiples(2)
self.shrink_by_multiples(1)
def shift_right(self):
base = self.current
find_integer(lambda k: k <= self.size and self.consider(base >> k))
def mask_high_bits(self):
base = self.current
n = base.bit_length()
@find_integer
def try_mask(k):
if k >= n:
return False
mask = (1 << (n - k)) - 1
return self.consider(mask & base)
@property
def size(self):
return self.current.bit_length()
def shrink_by_multiples(self, k):
base = self.current
@find_integer
def shrunk(n):
attempt = base - n * k
return attempt >= 0 and self.consider(attempt)
return shrunk > 0

View File

@@ -0,0 +1,32 @@
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from hypothesis.internal.conjecture.dfa import ConcreteDFA
SHRINKING_DFAS = {}
# Note: Everything below the following line is auto generated.
# Any code added after this point will be deleted by an automated
# process. Don't write code below this point.
#
# AUTOGENERATED BEGINS
# fmt: off
SHRINKING_DFAS['datetimes()-d66625c3b7'] = ConcreteDFA([[(0, 1), (1, 255, 2)], [(0, 3), (1, 255, 4)], [(0, 255, 4)], [(0, 5), (1, 255, 6)], [(0, 255, 6)], [(5, 255, 7)], [(0, 255, 7)], []], {7}) # noqa: E501
SHRINKING_DFAS['emails()-fde8f71142'] = ConcreteDFA([[(0, 1), (1, 255, 2)], [(0, 255, 2)], []], {2}) # noqa: E501
SHRINKING_DFAS['floats()-58ab5aefc9'] = ConcreteDFA([[(1, 1), (2, 255, 2)], [(1, 3)], [(0, 1, 3)], []], {3}) # noqa: E501
SHRINKING_DFAS['floats()-6b86629f89'] = ConcreteDFA([[(3, 1), (4, 255, 2)], [(1, 3)], [(0, 1, 3)], []], {3}) # noqa: E501
SHRINKING_DFAS['floats()-aa8aef1e72'] = ConcreteDFA([[(2, 1), (3, 255, 2)], [(1, 3)], [(0, 1, 3)], []], {3}) # noqa: E501
SHRINKING_DFAS['floats()-bf71ffe70f'] = ConcreteDFA([[(4, 1), (5, 255, 2)], [(1, 3)], [(0, 1, 3)], []], {3}) # noqa: E501
SHRINKING_DFAS['text()-05c917b389'] = ConcreteDFA([[(0, 1), (1, 8, 2)], [(9, 255, 3)], [(0, 255, 4)], [], [(0, 255, 5)], [(0, 255, 3)]], {3}) # noqa: E501
SHRINKING_DFAS['text()-807e5f9650'] = ConcreteDFA([[(0, 8, 1), (9, 255, 2)], [(1, 8, 3)], [(1, 8, 3)], [(0, 4)], [(0, 255, 5)], []], {2, 5}) # noqa: E501
# fmt: on

View File

@@ -0,0 +1,59 @@
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from hypothesis.internal.compat import int_from_bytes, int_to_bytes
from hypothesis.internal.conjecture.shrinking.common import Shrinker
from hypothesis.internal.conjecture.shrinking.integer import Integer
from hypothesis.internal.conjecture.shrinking.ordering import Ordering
"""
This module implements a lexicographic minimizer for blocks of bytes.
"""
class Lexical(Shrinker):
def make_immutable(self, value):
return bytes(value)
@property
def size(self):
return len(self.current)
def check_invariants(self, value):
assert len(value) == self.size
def left_is_better(self, left, right):
return left < right
def incorporate_int(self, i):
return self.incorporate(int_to_bytes(i, self.size))
@property
def current_int(self):
return int_from_bytes(self.current)
def minimize_as_integer(self):
Integer.shrink(
self.current_int,
lambda c: c == self.current_int or self.incorporate_int(c),
random=self.random,
)
def partial_sort(self):
Ordering.shrink(self.current, self.consider, random=self.random)
def short_circuit(self):
"""This is just an assemblage of other shrinkers, so we rely on their
short circuiting."""
return False
def run_step(self):
self.minimize_as_integer()
self.partial_sort()

View File

@@ -0,0 +1,99 @@
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from hypothesis.internal.conjecture.junkdrawer import find_integer
from hypothesis.internal.conjecture.shrinking.common import Shrinker
def identity(v):
return v
class Ordering(Shrinker):
"""A shrinker that tries to make a sequence more sorted.
Will not change the length or the contents, only tries to reorder
the elements of the sequence.
"""
def setup(self, key=identity):
self.key = key
def make_immutable(self, value):
return tuple(value)
def short_circuit(self):
# If we can flat out sort the target then there's nothing more to do.
return self.consider(sorted(self.current, key=self.key))
def left_is_better(self, left, right):
return tuple(map(self.key, left)) < tuple(map(self.key, right))
def check_invariants(self, value):
assert len(value) == len(self.current)
assert sorted(value) == sorted(self.current)
def run_step(self):
self.sort_regions()
self.sort_regions_with_gaps()
def sort_regions(self):
"""Guarantees that for each i we have tried to swap index i with
index i + 1.
This uses an adaptive algorithm that works by sorting contiguous
regions starting from each element.
"""
i = 0
while i + 1 < len(self.current):
prefix = list(self.current[:i])
k = find_integer(
lambda k: i + k <= len(self.current)
and self.consider(
prefix
+ sorted(self.current[i : i + k], key=self.key)
+ list(self.current[i + k :])
)
)
i += k
def sort_regions_with_gaps(self):
"""Guarantees that for each i we have tried to swap index i with
index i + 2.
This uses an adaptive algorithm that works by sorting contiguous
regions centered on each element, where that element is treated as
fixed and the elements around it are sorted..
"""
for i in range(1, len(self.current) - 1):
if self.current[i - 1] <= self.current[i] <= self.current[i + 1]:
# The `continue` line is optimised out of the bytecode on
# CPython >= 3.7 (https://bugs.python.org/issue2506) and on
# PyPy, and so coverage cannot tell that it has been taken.
continue # pragma: no cover
def can_sort(a, b):
if a < 0 or b > len(self.current):
return False
assert a <= i < b
split = i - a
values = sorted(self.current[a:i] + self.current[i + 1 : b])
return self.consider(
list(self.current[:a])
+ values[:split]
+ [self.current[i]]
+ values[split:]
+ list(self.current[b:])
)
left = i
right = i + 1
right += find_integer(lambda k: can_sort(left, right + k))
find_integer(lambda k: can_sort(left - k, right))