Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGES/12358.misc.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Changed ``zlib_executor_size`` default so compressed payloads are async by default -- by :user:`Dreamsorcerer`.
12 changes: 2 additions & 10 deletions aiohttp/web_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from . import hdrs, payload
from .abc import AbstractStreamWriter
from .compression_utils import ZLibCompressor
from .compression_utils import MAX_SYNC_CHUNK_SIZE, ZLibCompressor
from .helpers import (
ETAG_ANY,
QUOTED_ETAG_RE,
Expand All @@ -35,7 +35,6 @@
from .typedefs import JSONBytesEncoder, JSONEncoder, LooseHeaders

REASON_PHRASES = {http_status.value: http_status.phrase for http_status in HTTPStatus}
LARGE_BODY_SIZE = 1024**2

__all__ = (
"ContentCoding",
Expand Down Expand Up @@ -547,7 +546,7 @@ def __init__(
headers: LooseHeaders | None = None,
content_type: str | None = None,
charset: str | None = None,
zlib_executor_size: int | None = None,
zlib_executor_size: int = MAX_SYNC_CHUNK_SIZE,
zlib_executor: Executor | None = None,
) -> None:
if body is not None and text is not None:
Expand Down Expand Up @@ -726,13 +725,6 @@ async def _do_start_compression(self, coding: ContentCoding) -> None:
executor=self._zlib_executor,
)
assert self._body is not None
if self._zlib_executor_size is None and len(self._body) > LARGE_BODY_SIZE:
warnings.warn(
"Synchronous compression of large response bodies "
f"({len(self._body)} bytes) might block the async event loop. "
"Consider providing a custom value to zlib_executor_size/"
"zlib_executor response properties or disabling compression on it."
)
self._compressed_body = (
await compressor.compress(self._body) + compressor.flush()
)
Expand Down
30 changes: 30 additions & 0 deletions tests/test_benchmarks_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,6 +504,36 @@ def _run() -> None:
loop.run_until_complete(run_client_benchmark())


@pytest.mark.usefixtures("parametrize_zlib_backend")
def test_ten_compressed_responses_iter_chunked_1mb(
loop: asyncio.AbstractEventLoop,
aiohttp_client: AiohttpClient,
benchmark: BenchmarkFixture,
) -> None:
"""Benchmark compressed GET request read via large iter_chunked."""
MB = 2**20
data = b"x" * 10 * MB

async def handler(request: web.Request) -> web.Response:
resp = web.Response(body=data)
resp.enable_compression()
return resp

app = web.Application()
app.router.add_route("GET", "/", handler)

async def run_client_benchmark() -> None:
client = await aiohttp_client(app)
resp = await client.get("/")
async for _ in resp.content.iter_chunked(MB):
pass
await client.close()

@benchmark
def _run() -> None:
loop.run_until_complete(run_client_benchmark())


def test_ten_streamed_responses_iter_chunks(
loop: asyncio.AbstractEventLoop,
aiohttp_client: AiohttpClient,
Expand Down
42 changes: 42 additions & 0 deletions tests/test_benchmarks_web_request.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
"""codspeed benchmarks for web request reading."""

import asyncio
import zlib

import pytest
from pytest_codspeed import BenchmarkFixture

from aiohttp import web
from aiohttp.pytest_plugin import AiohttpClient


@pytest.mark.usefixtures("parametrize_zlib_backend")
def test_read_compressed_post_body(
loop: asyncio.AbstractEventLoop,
aiohttp_client: AiohttpClient,
benchmark: BenchmarkFixture,
) -> None:
"""Benchmark server Request.read() with a compressed POST body."""
original = b"B" * (5 * 2**20)
compressed = zlib.compress(original)

async def handler(request: web.Request) -> web.Response:
body = await request.read()
return web.Response(text=str(len(body)))

app = web.Application(client_max_size=10 * 2**20)
app.router.add_post("/", handler)

async def run_benchmark() -> None:
client = await aiohttp_client(app)
resp = await client.post(
"/",
data=compressed,
headers={"Content-Encoding": "deflate"},
)
assert int(await resp.read()) == len(original)
await client.close()

@benchmark
def _run() -> None:
loop.run_until_complete(run_benchmark())
20 changes: 0 additions & 20 deletions tests/test_web_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,26 +504,6 @@ async def test_force_compression_deflate() -> None:
assert "deflate" == resp.headers.get(hdrs.CONTENT_ENCODING)


@pytest.mark.usefixtures("parametrize_zlib_backend")
async def test_force_compression_deflate_large_payload() -> None:
"""Make sure a warning is thrown for large payloads compressed in the event loop."""
req = make_request(
"GET", "/", headers=CIMultiDict({hdrs.ACCEPT_ENCODING: "gzip, deflate"})
)
resp = web.Response(body=b"large")

resp.enable_compression(web.ContentCoding.deflate)
assert resp.compression

with (
pytest.warns(Warning, match="Synchronous compression of large response bodies"),
mock.patch("aiohttp.web_response.LARGE_BODY_SIZE", 2),
):
msg = await resp.prepare(req)
assert msg is not None
assert "deflate" == resp.headers.get(hdrs.CONTENT_ENCODING)


@pytest.mark.usefixtures("parametrize_zlib_backend")
async def test_force_compression_no_accept_deflate() -> None:
req = make_request("GET", "/")
Expand Down
Loading