Kaydet (Commit) 1e411c5c authored tarafından Martin Panter's avatar Martin Panter

Issue #27130: Fix handling of buffers exceeding (U)INT_MAX in “zlib” module

Ported from patches by Xiang Zhang, Nadeem Vawda, and myself.
üst 2955ef75
import unittest import unittest
from test import test_support as support
from test.test_support import TESTFN, run_unittest, import_module, unlink, requires from test.test_support import TESTFN, run_unittest, import_module, unlink, requires
import binascii import binascii
import pickle import pickle
...@@ -80,6 +81,16 @@ class ChecksumTestCase(unittest.TestCase): ...@@ -80,6 +81,16 @@ class ChecksumTestCase(unittest.TestCase):
zlib.crc32('spam', (2**31))) zlib.crc32('spam', (2**31)))
# Issue #10276 - check that inputs >=4GB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
@precisionbigmemtest(size=_4G + 4, memuse=1, dry_run=False)
def test_big_buffer(self, size):
data = b"nyan" * (_1G + 1)
self.assertEqual(zlib.crc32(data) & 0xFFFFFFFF, 1044521549)
self.assertEqual(zlib.adler32(data) & 0xFFFFFFFF, 2256789997)
class ExceptionTestCase(unittest.TestCase): class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors # make sure we generate some expected errors
def test_badlevel(self): def test_badlevel(self):
...@@ -104,6 +115,15 @@ class ExceptionTestCase(unittest.TestCase): ...@@ -104,6 +115,15 @@ class ExceptionTestCase(unittest.TestCase):
self.assertRaises(ValueError, zlib.decompressobj().flush, 0) self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1) self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
@support.cpython_only
def test_overflow(self):
with self.assertRaisesRegexp(OverflowError, 'int too large'):
zlib.decompress(b'', 15, sys.maxsize + 1)
with self.assertRaisesRegexp(OverflowError, 'int too large'):
zlib.decompressobj().decompress(b'', sys.maxsize + 1)
with self.assertRaisesRegexp(OverflowError, 'int too large'):
zlib.decompressobj().flush(sys.maxsize + 1)
class BaseCompressTestCase(object): class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func): def check_big_compress_buffer(self, size, compress_func):
...@@ -167,6 +187,28 @@ class CompressTestCase(BaseCompressTestCase, unittest.TestCase): ...@@ -167,6 +187,28 @@ class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
def test_big_decompress_buffer(self, size): def test_big_decompress_buffer(self, size):
self.check_big_decompress_buffer(size, zlib.decompress) self.check_big_decompress_buffer(size, zlib.decompress)
@precisionbigmemtest(size=_4G, memuse=1)
def test_large_bufsize(self, size):
# Test decompress(bufsize) parameter greater than the internal limit
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
self.assertEqual(zlib.decompress(compressed, 15, size), data)
def test_custom_bufsize(self):
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
self.assertEqual(zlib.decompress(compressed, 15, CustomInt()), data)
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@precisionbigmemtest(size=_4G + 100, memuse=4)
def test_64bit_compress(self, size):
data = b'x' * size
try:
comp = zlib.compress(data, 0)
self.assertEqual(zlib.decompress(comp), data)
finally:
comp = data = None
class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object # Test compression object
...@@ -318,6 +360,22 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): ...@@ -318,6 +360,22 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
self.assertRaises(ValueError, dco.decompress, "", -1) self.assertRaises(ValueError, dco.decompress, "", -1)
self.assertEqual('', dco.unconsumed_tail) self.assertEqual('', dco.unconsumed_tail)
def test_maxlen_large(self):
# Sizes up to sys.maxsize should be accepted, although zlib is
# internally limited to expressing sizes with unsigned int
data = HAMLET_SCENE * 10
DEFAULTALLOC = 16 * 1024
self.assertGreater(len(data), DEFAULTALLOC)
compressed = zlib.compress(data, 1)
dco = zlib.decompressobj()
self.assertEqual(dco.decompress(compressed, sys.maxsize), data)
def test_maxlen_custom(self):
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
dco = zlib.decompressobj()
self.assertEqual(dco.decompress(compressed, CustomInt()), data[:100])
def test_clear_unconsumed_tail(self): def test_clear_unconsumed_tail(self):
# Issue #12050: calling decompress() without providing max_length # Issue #12050: calling decompress() without providing max_length
# should clear the unconsumed_tail attribute. # should clear the unconsumed_tail attribute.
...@@ -416,6 +474,22 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): ...@@ -416,6 +474,22 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
data = zlib.compress(input2) data = zlib.compress(input2)
self.assertEqual(dco.flush(), input1[1:]) self.assertEqual(dco.flush(), input1[1:])
@precisionbigmemtest(size=_4G, memuse=1)
def test_flush_large_length(self, size):
# Test flush(length) parameter greater than internal limit UINT_MAX
input = HAMLET_SCENE * 10
data = zlib.compress(input, 1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
self.assertEqual(dco.flush(size), input[1:])
def test_flush_custom_length(self):
input = HAMLET_SCENE * 10
data = zlib.compress(input, 1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
self.assertEqual(dco.flush(CustomInt()), input[1:])
@requires_Compress_copy @requires_Compress_copy
def test_compresscopy(self): def test_compresscopy(self):
# Test copying a compression object # Test copying a compression object
...@@ -527,6 +601,46 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): ...@@ -527,6 +601,46 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
decompress = lambda s: d.decompress(s) + d.flush() decompress = lambda s: d.decompress(s) + d.flush()
self.check_big_decompress_buffer(size, decompress) self.check_big_decompress_buffer(size, decompress)
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@precisionbigmemtest(size=_4G + 100, memuse=4)
def test_64bit_compress(self, size):
data = b'x' * size
co = zlib.compressobj(0)
do = zlib.decompressobj()
try:
comp = co.compress(data) + co.flush()
uncomp = do.decompress(comp) + do.flush()
self.assertEqual(uncomp, data)
finally:
comp = uncomp = data = None
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@precisionbigmemtest(size=_4G + 100, memuse=3)
def test_large_unused_data(self, size):
data = b'abcdefghijklmnop'
unused = b'x' * size
comp = zlib.compress(data) + unused
do = zlib.decompressobj()
try:
uncomp = do.decompress(comp) + do.flush()
self.assertEqual(unused, do.unused_data)
self.assertEqual(uncomp, data)
finally:
unused = comp = do = None
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
@precisionbigmemtest(size=_4G + 100, memuse=5)
def test_large_unconsumed_tail(self, size):
data = b'x' * size
do = zlib.decompressobj()
try:
comp = zlib.compress(data, 0)
uncomp = do.decompress(comp, 1) + do.flush()
self.assertEqual(uncomp, data)
self.assertEqual(do.unconsumed_tail, b'')
finally:
comp = uncomp = data = None
def test_wbits(self): def test_wbits(self):
co = zlib.compressobj(1, zlib.DEFLATED, 15) co = zlib.compressobj(1, zlib.DEFLATED, 15)
zlib15 = co.compress(HAMLET_SCENE) + co.flush() zlib15 = co.compress(HAMLET_SCENE) + co.flush()
...@@ -658,9 +772,15 @@ LAERTES ...@@ -658,9 +772,15 @@ LAERTES
""" """
class CustomInt:
def __int__(self):
return 100
def test_main(): def test_main():
run_unittest( run_unittest(
ChecksumTestCase, ChecksumTestCase,
ChecksumBigBufferTestCase,
ExceptionTestCase, ExceptionTestCase,
CompressTestCase, CompressTestCase,
CompressObjectTestCase CompressObjectTestCase
......
...@@ -26,6 +26,11 @@ Core and Builtins ...@@ -26,6 +26,11 @@ Core and Builtins
Library Library
------- -------
- Issue #27130: In the "zlib" module, fix handling of large buffers
(typically 2 or 4 GiB). Previously, inputs were limited to 2 GiB, and
compression and decompression operations did not properly handle results of
2 or 4 GiB.
- Issue #23804: Fix SSL zero-length recv() calls to not block and not raise - Issue #23804: Fix SSL zero-length recv() calls to not block and not raise
an error about unclean EOF. an error about unclean EOF.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment