mirror of
https://github.com/django/django.git
synced 2025-10-31 09:41:08 +00:00
Fixed #24242 -- Improved efficiency of utils.text.compress_sequence()
The function no longer flushes zfile after each write as doing so can lead to the gzipped streamed content being larger than the original content; each flush adds a 5/6 byte type 0 block. Removing this means buf.read() may return nothing, so only yield if that has some data. Testing shows without the flush() the buffer is being flushed every 17k or so and compresses the same as if it had been done as a whole string.
This commit is contained in:
committed by
Tim Graham
parent
2730dad0d7
commit
caa3562d5b
@@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
from django.test import SimpleTestCase
|
||||
from django.utils import six, text
|
||||
from django.utils.encoding import force_text
|
||||
@@ -192,3 +194,12 @@ class TestUtilsText(SimpleTestCase):
|
||||
def test_get_valid_filename(self):
|
||||
filename = "^&'@{}[],$=!-#()%+~_123.txt"
|
||||
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
|
||||
|
||||
def test_compress_sequence(self):
|
||||
data = [{'key': i} for i in range(10)]
|
||||
seq = list(json.JSONEncoder().iterencode(data))
|
||||
seq = [s.encode('utf-8') for s in seq]
|
||||
actual_length = len(b''.join(seq))
|
||||
out = text.compress_sequence(seq)
|
||||
compressed_length = len(b''.join(out))
|
||||
self.assertTrue(compressed_length < actual_length)
|
||||
|
||||
Reference in New Issue
Block a user