From 89f7f09b9a2d0dfccefabebbe9b83307133bd97c Mon Sep 17 00:00:00 2001 From: Miguel Grinberg Date: Sat, 4 May 2019 21:16:04 +0000 Subject: [PATCH] async request and response unit tests --- tests/__init__.py | 3 + tests/libs/uasyncio/__init__.py | 258 ++++++++++++++++++++++++++ tests/libs/uasyncio/core.py | 318 ++++++++++++++++++++++++++++++++ tests/mock_socket.py | 25 +++ tests/test_request_async.py | 86 +++++++++ tests/test_response_async.py | 86 +++++++++ 6 files changed, 776 insertions(+) create mode 100644 tests/libs/uasyncio/__init__.py create mode 100644 tests/libs/uasyncio/core.py create mode 100644 tests/test_request_async.py create mode 100644 tests/test_response_async.py diff --git a/tests/__init__.py b/tests/__init__.py index f953a6f..8da4730 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -2,3 +2,6 @@ from tests.test_request import TestRequest from tests.test_response import TestResponse from tests.test_url_pattern import TestURLPattern from tests.test_microdot import TestMicrodot + +from tests.test_request_async import TestRequestAsync +from tests.test_response_async import TestResponseAsync diff --git a/tests/libs/uasyncio/__init__.py b/tests/libs/uasyncio/__init__.py new file mode 100644 index 0000000..41fa572 --- /dev/null +++ b/tests/libs/uasyncio/__init__.py @@ -0,0 +1,258 @@ +import uerrno +import uselect as select +import usocket as _socket +from uasyncio.core import * + + +DEBUG = 0 +log = None + +def set_debug(val): + global DEBUG, log + DEBUG = val + if val: + import logging + log = logging.getLogger("uasyncio") + + +class PollEventLoop(EventLoop): + + def __init__(self, runq_len=16, waitq_len=16): + EventLoop.__init__(self, runq_len, waitq_len) + self.poller = select.poll() + self.objmap = {} + + def add_reader(self, sock, cb, *args): + if DEBUG and __debug__: + log.debug("add_reader%s", (sock, cb, args)) + if args: + self.poller.register(sock, select.POLLIN) + self.objmap[id(sock)] = (cb, args) + else: + self.poller.register(sock, select.POLLIN) + self.objmap[id(sock)] = cb + + def remove_reader(self, sock): + if DEBUG and __debug__: + log.debug("remove_reader(%s)", sock) + self.poller.unregister(sock) + del self.objmap[id(sock)] + + def add_writer(self, sock, cb, *args): + if DEBUG and __debug__: + log.debug("add_writer%s", (sock, cb, args)) + if args: + self.poller.register(sock, select.POLLOUT) + self.objmap[id(sock)] = (cb, args) + else: + self.poller.register(sock, select.POLLOUT) + self.objmap[id(sock)] = cb + + def remove_writer(self, sock): + if DEBUG and __debug__: + log.debug("remove_writer(%s)", sock) + try: + self.poller.unregister(sock) + self.objmap.pop(id(sock), None) + except OSError as e: + # StreamWriter.awrite() first tries to write to a socket, + # and if that succeeds, yield IOWrite may never be called + # for that socket, and it will never be added to poller. So, + # ignore such error. + if e.args[0] != uerrno.ENOENT: + raise + + def wait(self, delay): + if DEBUG and __debug__: + log.debug("poll.wait(%d)", delay) + # We need one-shot behavior (second arg of 1 to .poll()) + res = self.poller.ipoll(delay, 1) + #log.debug("poll result: %s", res) + # Remove "if res" workaround after + # https://github.com/micropython/micropython/issues/2716 fixed. + if res: + for sock, ev in res: + cb = self.objmap[id(sock)] + if ev & (select.POLLHUP | select.POLLERR): + # These events are returned even if not requested, and + # are sticky, i.e. will be returned again and again. + # If the caller doesn't do proper error handling and + # unregister this sock, we'll busy-loop on it, so we + # as well can unregister it now "just in case". + self.remove_reader(sock) + if DEBUG and __debug__: + log.debug("Calling IO callback: %r", cb) + if isinstance(cb, tuple): + cb[0](*cb[1]) + else: + cb.pend_throw(None) + self.call_soon(cb) + + +class StreamReader: + + def __init__(self, polls, ios=None): + if ios is None: + ios = polls + self.polls = polls + self.ios = ios + + def read(self, n=-1): + while True: + yield IORead(self.polls) + res = self.ios.read(n) + if res is not None: + break + # This should not happen for real sockets, but can easily + # happen for stream wrappers (ssl, websockets, etc.) + #log.warn("Empty read") + if not res: + yield IOReadDone(self.polls) + return res + + def readexactly(self, n): + buf = b"" + while n: + yield IORead(self.polls) + res = self.ios.read(n) + assert res is not None + if not res: + yield IOReadDone(self.polls) + break + buf += res + n -= len(res) + return buf + + def readline(self): + if DEBUG and __debug__: + log.debug("StreamReader.readline()") + buf = b"" + while True: + yield IORead(self.polls) + res = self.ios.readline() + assert res is not None + if not res: + yield IOReadDone(self.polls) + break + buf += res + if buf[-1] == 0x0a: + break + if DEBUG and __debug__: + log.debug("StreamReader.readline(): %s", buf) + return buf + + def aclose(self): + yield IOReadDone(self.polls) + self.ios.close() + + def __repr__(self): + return "" % (self.polls, self.ios) + + +class StreamWriter: + + def __init__(self, s, extra): + self.s = s + self.extra = extra + + def awrite(self, buf, off=0, sz=-1): + # This method is called awrite (async write) to not proliferate + # incompatibility with original asyncio. Unlike original asyncio + # whose .write() method is both not a coroutine and guaranteed + # to return immediately (which means it has to buffer all the + # data), this method is a coroutine. + if sz == -1: + sz = len(buf) - off + if DEBUG and __debug__: + log.debug("StreamWriter.awrite(): spooling %d bytes", sz) + while True: + res = self.s.write(buf, off, sz) + # If we spooled everything, return immediately + if res == sz: + if DEBUG and __debug__: + log.debug("StreamWriter.awrite(): completed spooling %d bytes", res) + return + if res is None: + res = 0 + if DEBUG and __debug__: + log.debug("StreamWriter.awrite(): spooled partial %d bytes", res) + assert res < sz + off += res + sz -= res + yield IOWrite(self.s) + #assert s2.fileno() == self.s.fileno() + if DEBUG and __debug__: + log.debug("StreamWriter.awrite(): can write more") + + # Write piecewise content from iterable (usually, a generator) + def awriteiter(self, iterable): + for buf in iterable: + yield from self.awrite(buf) + + def aclose(self): + yield IOWriteDone(self.s) + self.s.close() + + def get_extra_info(self, name, default=None): + return self.extra.get(name, default) + + def __repr__(self): + return "" % self.s + + +def open_connection(host, port, ssl=False): + if DEBUG and __debug__: + log.debug("open_connection(%s, %s)", host, port) + ai = _socket.getaddrinfo(host, port, 0, _socket.SOCK_STREAM) + ai = ai[0] + s = _socket.socket(ai[0], ai[1], ai[2]) + s.setblocking(False) + try: + s.connect(ai[-1]) + except OSError as e: + if e.args[0] != uerrno.EINPROGRESS: + raise + if DEBUG and __debug__: + log.debug("open_connection: After connect") + yield IOWrite(s) +# if __debug__: +# assert s2.fileno() == s.fileno() + if DEBUG and __debug__: + log.debug("open_connection: After iowait: %s", s) + if ssl: + print("Warning: uasyncio SSL support is alpha") + import ussl + s.setblocking(True) + s2 = ussl.wrap_socket(s) + s.setblocking(False) + return StreamReader(s, s2), StreamWriter(s2, {}) + return StreamReader(s), StreamWriter(s, {}) + + +def start_server(client_coro, host, port, backlog=10): + if DEBUG and __debug__: + log.debug("start_server(%s, %s)", host, port) + ai = _socket.getaddrinfo(host, port, 0, _socket.SOCK_STREAM) + ai = ai[0] + s = _socket.socket(ai[0], ai[1], ai[2]) + s.setblocking(False) + + s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) + s.bind(ai[-1]) + s.listen(backlog) + while True: + if DEBUG and __debug__: + log.debug("start_server: Before accept") + yield IORead(s) + if DEBUG and __debug__: + log.debug("start_server: After iowait") + s2, client_addr = s.accept() + s2.setblocking(False) + if DEBUG and __debug__: + log.debug("start_server: After accept: %s", s2) + extra = {"peername": client_addr} + yield client_coro(StreamReader(s2), StreamWriter(s2, extra)) + + +import uasyncio.core +uasyncio.core._event_loop_class = PollEventLoop diff --git a/tests/libs/uasyncio/core.py b/tests/libs/uasyncio/core.py new file mode 100644 index 0000000..95714f7 --- /dev/null +++ b/tests/libs/uasyncio/core.py @@ -0,0 +1,318 @@ +import utime as time +import utimeq +import ucollections + + +type_gen = type((lambda: (yield))()) + +DEBUG = 0 +log = None + +def set_debug(val): + global DEBUG, log + DEBUG = val + if val: + import logging + log = logging.getLogger("uasyncio.core") + + +class CancelledError(Exception): + pass + + +class TimeoutError(CancelledError): + pass + + +class EventLoop: + + def __init__(self, runq_len=16, waitq_len=16): + self.runq = ucollections.deque((), runq_len, True) + self.waitq = utimeq.utimeq(waitq_len) + # Current task being run. Task is a top-level coroutine scheduled + # in the event loop (sub-coroutines executed transparently by + # yield from/await, event loop "doesn't see" them). + self.cur_task = None + + def time(self): + return time.ticks_ms() + + def create_task(self, coro): + # CPython 3.4.2 + self.call_later_ms(0, coro) + # CPython asyncio incompatibility: we don't return Task object + + def call_soon(self, callback, *args): + if __debug__ and DEBUG: + log.debug("Scheduling in runq: %s", (callback, args)) + self.runq.append(callback) + if not isinstance(callback, type_gen): + self.runq.append(args) + + def call_later(self, delay, callback, *args): + self.call_at_(time.ticks_add(self.time(), int(delay * 1000)), callback, args) + + def call_later_ms(self, delay, callback, *args): + if not delay: + return self.call_soon(callback, *args) + self.call_at_(time.ticks_add(self.time(), delay), callback, args) + + def call_at_(self, time, callback, args=()): + if __debug__ and DEBUG: + log.debug("Scheduling in waitq: %s", (time, callback, args)) + self.waitq.push(time, callback, args) + + def wait(self, delay): + # Default wait implementation, to be overriden in subclasses + # with IO scheduling + if __debug__ and DEBUG: + log.debug("Sleeping for: %s", delay) + time.sleep_ms(delay) + + def run_forever(self): + cur_task = [0, 0, 0] + while True: + # Expire entries in waitq and move them to runq + tnow = self.time() + while self.waitq: + t = self.waitq.peektime() + delay = time.ticks_diff(t, tnow) + if delay > 0: + break + self.waitq.pop(cur_task) + if __debug__ and DEBUG: + log.debug("Moving from waitq to runq: %s", cur_task[1]) + self.call_soon(cur_task[1], *cur_task[2]) + + # Process runq + l = len(self.runq) + if __debug__ and DEBUG: + log.debug("Entries in runq: %d", l) + while l: + cb = self.runq.popleft() + l -= 1 + args = () + if not isinstance(cb, type_gen): + args = self.runq.popleft() + l -= 1 + if __debug__ and DEBUG: + log.info("Next callback to run: %s", (cb, args)) + cb(*args) + continue + + if __debug__ and DEBUG: + log.info("Next coroutine to run: %s", (cb, args)) + self.cur_task = cb + delay = 0 + try: + if args is (): + ret = next(cb) + else: + ret = cb.send(*args) + if __debug__ and DEBUG: + log.info("Coroutine %s yield result: %s", cb, ret) + if isinstance(ret, SysCall1): + arg = ret.arg + if isinstance(ret, SleepMs): + delay = arg + elif isinstance(ret, IORead): + cb.pend_throw(False) + self.add_reader(arg, cb) + continue + elif isinstance(ret, IOWrite): + cb.pend_throw(False) + self.add_writer(arg, cb) + continue + elif isinstance(ret, IOReadDone): + self.remove_reader(arg) + elif isinstance(ret, IOWriteDone): + self.remove_writer(arg) + elif isinstance(ret, StopLoop): + return arg + else: + assert False, "Unknown syscall yielded: %r (of type %r)" % (ret, type(ret)) + elif isinstance(ret, type_gen): + self.call_soon(ret) + elif isinstance(ret, int): + # Delay + delay = ret + elif ret is None: + # Just reschedule + pass + elif ret is False: + # Don't reschedule + continue + else: + assert False, "Unsupported coroutine yield value: %r (of type %r)" % (ret, type(ret)) + except StopIteration as e: + if __debug__ and DEBUG: + log.debug("Coroutine finished: %s", cb) + continue + except CancelledError as e: + if __debug__ and DEBUG: + log.debug("Coroutine cancelled: %s", cb) + continue + # Currently all syscalls don't return anything, so we don't + # need to feed anything to the next invocation of coroutine. + # If that changes, need to pass that value below. + if delay: + self.call_later_ms(delay, cb) + else: + self.call_soon(cb) + + # Wait until next waitq task or I/O availability + delay = 0 + if not self.runq: + delay = -1 + if self.waitq: + tnow = self.time() + t = self.waitq.peektime() + delay = time.ticks_diff(t, tnow) + if delay < 0: + delay = 0 + self.wait(delay) + + def run_until_complete(self, coro): + ret = None + def _run_and_stop(): + nonlocal ret + ret = yield from coro + yield StopLoop(0) + self.call_soon(_run_and_stop()) + self.run_forever() + return ret + + def stop(self): + self.call_soon((lambda: (yield StopLoop(0)))()) + + def close(self): + pass + + +class SysCall: + + def __init__(self, *args): + self.args = args + + def handle(self): + raise NotImplementedError + +# Optimized syscall with 1 arg +class SysCall1(SysCall): + + def __init__(self, arg): + self.arg = arg + +class StopLoop(SysCall1): + pass + +class IORead(SysCall1): + pass + +class IOWrite(SysCall1): + pass + +class IOReadDone(SysCall1): + pass + +class IOWriteDone(SysCall1): + pass + + +_event_loop = None +_event_loop_class = EventLoop +def get_event_loop(runq_len=16, waitq_len=16): + global _event_loop + if _event_loop is None: + _event_loop = _event_loop_class(runq_len, waitq_len) + return _event_loop + +def sleep(secs): + yield int(secs * 1000) + +# Implementation of sleep_ms awaitable with zero heap memory usage +class SleepMs(SysCall1): + + def __init__(self): + self.v = None + self.arg = None + + def __call__(self, arg): + self.v = arg + #print("__call__") + return self + + def __iter__(self): + #print("__iter__") + return self + + def __next__(self): + if self.v is not None: + #print("__next__ syscall enter") + self.arg = self.v + self.v = None + return self + #print("__next__ syscall exit") + _stop_iter.__traceback__ = None + raise _stop_iter + +_stop_iter = StopIteration() +sleep_ms = SleepMs() + + +def cancel(coro): + prev = coro.pend_throw(CancelledError()) + if prev is False: + _event_loop.call_soon(coro) + + +class TimeoutObj: + def __init__(self, coro): + self.coro = coro + + +def wait_for_ms(coro, timeout): + + def waiter(coro, timeout_obj): + res = yield from coro + if __debug__ and DEBUG: + log.debug("waiter: cancelling %s", timeout_obj) + timeout_obj.coro = None + return res + + def timeout_func(timeout_obj): + if timeout_obj.coro: + if __debug__ and DEBUG: + log.debug("timeout_func: cancelling %s", timeout_obj.coro) + prev = timeout_obj.coro.pend_throw(TimeoutError()) + #print("prev pend", prev) + if prev is False: + _event_loop.call_soon(timeout_obj.coro) + + timeout_obj = TimeoutObj(_event_loop.cur_task) + _event_loop.call_later_ms(timeout, timeout_func, timeout_obj) + return (yield from waiter(coro, timeout_obj)) + + +def wait_for(coro, timeout): + return wait_for_ms(coro, int(timeout * 1000)) + + +def coroutine(f): + return f + +# +# The functions below are deprecated in uasyncio, and provided only +# for compatibility with CPython asyncio +# + +def ensure_future(coro, loop=_event_loop): + _event_loop.call_soon(coro) + # CPython asyncio incompatibility: we don't return Task object + return coro + + +# CPython asyncio incompatibility: Task is a function, not a class (for efficiency) +def Task(coro, loop=_event_loop): + # Same as async() + _event_loop.call_soon(coro) diff --git a/tests/mock_socket.py b/tests/mock_socket.py index d0597a6..611a35a 100644 --- a/tests/mock_socket.py +++ b/tests/mock_socket.py @@ -43,6 +43,26 @@ class FakeStream(io.BytesIO): self.response += data +class FakeStreamAsync: + def __init__(self, stream=None): + if stream is None: + stream = FakeStream(b'') + self.stream = stream + + @property + def response(self): + return self.stream.response + + async def readline(self): + return self.stream.readline() + + async def read(self, n): + return self.stream.read(n) + + async def awrite(self, data): + self.stream.write(data) + + def get_request_fd(method, path, headers=None, body=None): if headers is None: headers = {} @@ -61,6 +81,11 @@ def get_request_fd(method, path, headers=None, body=None): return FakeStream(request_bytes.encode()) +def get_async_request_fd(method, path, headers=None, body=None): + fd = get_request_fd(method, path, headers=headers, body=body) + return FakeStreamAsync(fd) + + def clear_requests(): _requests.clear() diff --git a/tests/test_request_async.py b/tests/test_request_async.py new file mode 100644 index 0000000..5dae0b2 --- /dev/null +++ b/tests/test_request_async.py @@ -0,0 +1,86 @@ +try: + import uasyncio as asyncio +except ImportError: + import asyncio + +import unittest +from microdot_async import Request +from tests.mock_socket import get_async_request_fd + + +def _run(coro): + return asyncio.get_event_loop().run_until_complete(coro) + + +class TestRequestAsync(unittest.TestCase): + def test_create_request(self): + fd = get_async_request_fd('GET', '/foo') + req = _run(Request.create(fd, 'addr')) + self.assertEqual(req.client_addr, 'addr') + self.assertEqual(req.method, 'GET') + self.assertEqual(req.path, '/foo') + self.assertEqual(req.http_version, '1.0') + self.assertIsNone(req.query_string) + self.assertEqual(req.args, {}) + self.assertEqual(req.headers, {'Host': 'example.com:1234'}) + self.assertEqual(req.cookies, {}) + self.assertEqual(req.content_length, 0) + self.assertEqual(req.content_type, None) + self.assertEqual(req.body, b'') + self.assertEqual(req.json, None) + self.assertEqual(req.form, None) + + def test_headers(self): + fd = get_async_request_fd('GET', '/foo', headers={ + 'Content-Type': 'application/json', + 'Cookie': 'foo=bar;abc=def', + 'Content-Length': '3'}, body='aaa') + req = _run(Request.create(fd, 'addr')) + self.assertEqual(req.headers, { + 'Host': 'example.com:1234', + 'Content-Type': 'application/json', + 'Cookie': 'foo=bar;abc=def', + 'Content-Length': '3'}) + self.assertEqual(req.content_type, 'application/json') + self.assertEqual(req.cookies, {'foo': 'bar', 'abc': 'def'}) + self.assertEqual(req.content_length, 3) + self.assertEqual(req.body, b'aaa') + + def test_args(self): + fd = get_async_request_fd('GET', '/?foo=bar&abc=def&x=%2f%%') + req = _run(Request.create(fd, 'addr')) + self.assertEqual(req.query_string, 'foo=bar&abc=def&x=%2f%%') + self.assertEqual(req.args, {'foo': 'bar', 'abc': 'def', 'x': '/%%'}) + + def test_json(self): + fd = get_async_request_fd('GET', '/foo', headers={ + 'Content-Type': 'application/json'}, body='{"foo":"bar"}') + req = _run(Request.create(fd, 'addr')) + json = req.json + self.assertEqual(json, {'foo': 'bar'}) + self.assertTrue(req.json is json) + + fd = get_async_request_fd('GET', '/foo', headers={ + 'Content-Type': 'application/json'}, body='[1, "2"]') + req = _run(Request.create(fd, 'addr')) + self.assertEqual(req.json, [1, '2']) + + fd = get_async_request_fd('GET', '/foo', headers={ + 'Content-Type': 'application/xml'}, body='[1, "2"]') + req = _run(Request.create(fd, 'addr')) + self.assertIsNone(req.json) + + def test_form(self): + fd = get_async_request_fd('GET', '/foo', headers={ + 'Content-Type': 'application/x-www-form-urlencoded'}, + body='foo=bar&abc=def&x=%2f%%') + req = _run(Request.create(fd, 'addr')) + form = req.form + self.assertEqual(form, {'foo': 'bar', 'abc': 'def', 'x': '/%%'}) + self.assertTrue(req.form is form) + + fd = get_async_request_fd('GET', '/foo', headers={ + 'Content-Type': 'application/json'}, + body='foo=bar&abc=def&x=%2f%%') + req = _run(Request.create(fd, 'addr')) + self.assertIsNone(req.form) diff --git a/tests/test_response_async.py b/tests/test_response_async.py new file mode 100644 index 0000000..a0e963f --- /dev/null +++ b/tests/test_response_async.py @@ -0,0 +1,86 @@ +try: + import uasyncio as asyncio +except ImportError: + import asyncio + +import unittest +from microdot_async import Response +from tests.mock_socket import FakeStreamAsync + + +def _run(coro): + return asyncio.get_event_loop().run_until_complete(coro) + + +class TestResponseAsync(unittest.TestCase): + def test_create_from_string(self): + res = Response('foo') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.headers, {}) + self.assertEqual(res.body, b'foo') + fd = FakeStreamAsync() + _run(res.write(fd)) + self.assertIn(b'HTTP/1.0 200 OK\r\n', fd.response) + self.assertIn(b'Content-Length: 3\r\n', fd.response) + self.assertIn(b'Content-Type: text/plain\r\n', fd.response) + self.assertTrue(fd.response.endswith(b'\r\n\r\nfoo')) + + def test_create_from_string_with_content_length(self): + res = Response('foo', headers={'Content-Length': '2'}) + self.assertEqual(res.status_code, 200) + self.assertEqual(res.headers, {'Content-Length': '2'}) + self.assertEqual(res.body, b'foo') + fd = FakeStreamAsync() + _run(res.write(fd)) + self.assertIn(b'HTTP/1.0 200 OK\r\n', fd.response) + self.assertIn(b'Content-Length: 2\r\n', fd.response) + self.assertIn(b'Content-Type: text/plain\r\n', fd.response) + self.assertTrue(fd.response.endswith(b'\r\n\r\nfoo')) + + def test_create_from_bytes(self): + res = Response(b'foo') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.headers, {}) + self.assertEqual(res.body, b'foo') + fd = FakeStreamAsync() + _run(res.write(fd)) + self.assertIn(b'HTTP/1.0 200 OK\r\n', fd.response) + self.assertIn(b'Content-Length: 3\r\n', fd.response) + self.assertIn(b'Content-Type: text/plain\r\n', fd.response) + self.assertTrue(fd.response.endswith(b'\r\n\r\nfoo')) + + def test_create_empty(self): + res = Response(headers={'X-Foo': 'Bar'}) + self.assertEqual(res.status_code, 200) + self.assertEqual(res.headers, {'X-Foo': 'Bar'}) + self.assertEqual(res.body, b'') + fd = FakeStreamAsync() + _run(res.write(fd)) + self.assertIn(b'HTTP/1.0 200 OK\r\n', fd.response) + self.assertIn(b'X-Foo: Bar\r\n', fd.response) + self.assertIn(b'Content-Length: 0\r\n', fd.response) + self.assertIn(b'Content-Type: text/plain\r\n', fd.response) + self.assertTrue(fd.response.endswith(b'\r\n\r\n')) + + def test_create_json(self): + res = Response({'foo': 'bar'}) + self.assertEqual(res.status_code, 200) + self.assertEqual(res.headers, {'Content-Type': 'application/json'}) + self.assertEqual(res.body, b'{"foo": "bar"}') + fd = FakeStreamAsync() + _run(res.write(fd)) + self.assertIn(b'HTTP/1.0 200 OK\r\n', fd.response) + self.assertIn(b'Content-Length: 14\r\n', fd.response) + self.assertIn(b'Content-Type: application/json\r\n', fd.response) + self.assertTrue(fd.response.endswith(b'\r\n\r\n{"foo": "bar"}')) + + res = Response([1, '2']) + self.assertEqual(res.status_code, 200) + self.assertEqual(res.headers, {'Content-Type': 'application/json'}) + self.assertEqual(res.body, b'[1, "2"]') + fd = FakeStreamAsync() + _run(res.write(fd)) + self.assertIn(b'HTTP/1.0 200 OK\r\n', fd.response) + self.assertIn(b'Content-Length: 8\r\n', fd.response) + self.assertIn(b'Content-Type: application/json\r\n', fd.response) + self.assertTrue(fd.response.endswith(b'\r\n\r\n[1, "2"]'))