| 1 | #!/bin/sh
|
|---|
| 2 | # Test for this performance regression:
|
|---|
| 3 | # grep-3.4 would require O(N^2) RSS for N regexps
|
|---|
| 4 | # grep-3.5 requires O(N) in the most common cases.
|
|---|
| 5 |
|
|---|
| 6 | # Copyright 2020-2021 Free Software Foundation, Inc.
|
|---|
| 7 |
|
|---|
| 8 | # This program is free software: you can redistribute it and/or modify
|
|---|
| 9 | # it under the terms of the GNU General Public License as published by
|
|---|
| 10 | # the Free Software Foundation, either version 3 of the License, or
|
|---|
| 11 | # (at your option) any later version.
|
|---|
| 12 |
|
|---|
| 13 | # This program is distributed in the hope that it will be useful,
|
|---|
| 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|---|
| 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|---|
| 16 | # GNU General Public License for more details.
|
|---|
| 17 |
|
|---|
| 18 | # You should have received a copy of the GNU General Public License
|
|---|
| 19 | # along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|---|
| 20 |
|
|---|
| 21 | . "${srcdir=.}/init.sh"; path_prepend_ ../src
|
|---|
| 22 |
|
|---|
| 23 | fail=0
|
|---|
| 24 |
|
|---|
| 25 | # This test is susceptible to failure due to differences in
|
|---|
| 26 | # system load during the two test runs, so we'll mark it as
|
|---|
| 27 | # "expensive", making it less likely to be run by regular users.
|
|---|
| 28 | expensive_
|
|---|
| 29 |
|
|---|
| 30 | # Make the quick/small input large enough so that even on high-end
|
|---|
| 31 | # systems this first invocation takes at least 10ms of user time.
|
|---|
| 32 | word_list=/usr/share/dict/linux.words
|
|---|
| 33 |
|
|---|
| 34 | # If $word_list does not exist, generate an input that exibhits
|
|---|
| 35 | # similar performance characteristics.
|
|---|
| 36 | if ! test -f $word_list; then
|
|---|
| 37 | # Generate data comprable to that word list.
|
|---|
| 38 | # Note how all "words" start with "a", and that there is
|
|---|
| 39 | # a small percentage of lines with at least one "." metachar.
|
|---|
| 40 | # This requires /dev/urandom, so if it's not present, skip
|
|---|
| 41 | # this test. If desperate, we could fall back to using
|
|---|
| 42 | # tar+compressed lib/*.c as the data source.
|
|---|
| 43 | test -r /dev/urandom \
|
|---|
| 44 | || skip_ 'this system has neither word list nor working /dev/urandom'
|
|---|
| 45 | word_list=word_list
|
|---|
| 46 | ( echo a; cat /dev/urandom \
|
|---|
| 47 | | LC_ALL=C tr -dc 'a-zA-Z0-9_' \
|
|---|
| 48 | | head -c500000 \
|
|---|
| 49 | | sed 's/\(........\)/\1\n/g' \
|
|---|
| 50 | | sed s/rs/./ \
|
|---|
| 51 | | sed s/./a/ \
|
|---|
| 52 | | sort \
|
|---|
| 53 | ) > $word_list
|
|---|
| 54 | fi
|
|---|
| 55 |
|
|---|
| 56 | n_lines=2000
|
|---|
| 57 | while :; do
|
|---|
| 58 | sed ${n_lines}q < $word_list > in || framework_failure_
|
|---|
| 59 | small_ms=$(LC_ALL=C user_time_ 1 grep --file=in -v in) || fail=1
|
|---|
| 60 | test $small_ms -ge 10 && break
|
|---|
| 61 | n_lines=$(expr $n_lines + 2000)
|
|---|
| 62 | done
|
|---|
| 63 |
|
|---|
| 64 | # Now, run it again, but with 20 times as many lines.
|
|---|
| 65 | n_lines=$(expr $n_lines \* 20)
|
|---|
| 66 | sed ${n_lines}q < $word_list > in || framework_failure_
|
|---|
| 67 | large_ms=$(LC_ALL=C user_time_ 1 grep --file=in -v in) || fail=1
|
|---|
| 68 |
|
|---|
| 69 | # Deliberately recording in an unused variable so it
|
|---|
| 70 | # shows up in set -x output, in case this test fails.
|
|---|
| 71 | ratio=$(expr "$large_ms" / "$small_ms")
|
|---|
| 72 |
|
|---|
| 73 | # The duration of the larger run must be no more than 60 times
|
|---|
| 74 | # that of the small one. Using recent versions prior to this fix,
|
|---|
| 75 | # this test would fail due to ratios larger than 300. Using the
|
|---|
| 76 | # fixed version, it's common to see a ratio of 20-30.
|
|---|
| 77 | returns_ 1 expr $small_ms '<' $large_ms / 60 || fail=1
|
|---|
| 78 |
|
|---|
| 79 | Exit $fail
|
|---|