second commit

This commit is contained in:
2024-12-27 22:31:23 +09:00
parent 2353324570
commit 10a0f110ca
8819 changed files with 1307198 additions and 28 deletions

View File

@ -0,0 +1,348 @@
from __future__ import annotations
import socket
import ssl as ssl_module
import threading
import warnings
from collections.abc import Sequence
from typing import Any
from ..client import ClientProtocol
from ..datastructures import HeadersLike
from ..extensions.base import ClientExtensionFactory
from ..extensions.permessage_deflate import enable_client_permessage_deflate
from ..headers import validate_subprotocols
from ..http11 import USER_AGENT, Response
from ..protocol import CONNECTING, Event
from ..typing import LoggerLike, Origin, Subprotocol
from ..uri import parse_uri
from .connection import Connection
from .utils import Deadline
__all__ = ["connect", "unix_connect", "ClientConnection"]
class ClientConnection(Connection):
"""
:mod:`threading` implementation of a WebSocket client connection.
:class:`ClientConnection` provides :meth:`recv` and :meth:`send` methods for
receiving and sending messages.
It supports iteration to receive messages::
for message in websocket:
process(message)
The iterator exits normally when the connection is closed with close code
1000 (OK) or 1001 (going away) or without a close code. It raises a
:exc:`~websockets.exceptions.ConnectionClosedError` when the connection is
closed with any other code.
The ``close_timeout`` and ``max_queue`` arguments have the same meaning as
in :func:`connect`.
Args:
socket: Socket connected to a WebSocket server.
protocol: Sans-I/O connection.
"""
def __init__(
self,
socket: socket.socket,
protocol: ClientProtocol,
*,
close_timeout: float | None = 10,
max_queue: int | None | tuple[int | None, int | None] = 16,
) -> None:
self.protocol: ClientProtocol
self.response_rcvd = threading.Event()
super().__init__(
socket,
protocol,
close_timeout=close_timeout,
max_queue=max_queue,
)
def handshake(
self,
additional_headers: HeadersLike | None = None,
user_agent_header: str | None = USER_AGENT,
timeout: float | None = None,
) -> None:
"""
Perform the opening handshake.
"""
with self.send_context(expected_state=CONNECTING):
self.request = self.protocol.connect()
if additional_headers is not None:
self.request.headers.update(additional_headers)
if user_agent_header is not None:
self.request.headers["User-Agent"] = user_agent_header
self.protocol.send_request(self.request)
if not self.response_rcvd.wait(timeout):
raise TimeoutError("timed out during handshake")
# self.protocol.handshake_exc is set when the connection is lost before
# receiving a response, when the response cannot be parsed, or when the
# response fails the handshake.
if self.protocol.handshake_exc is not None:
raise self.protocol.handshake_exc
def process_event(self, event: Event) -> None:
"""
Process one incoming event.
"""
# First event - handshake response.
if self.response is None:
assert isinstance(event, Response)
self.response = event
self.response_rcvd.set()
# Later events - frames.
else:
super().process_event(event)
def recv_events(self) -> None:
"""
Read incoming data from the socket and process events.
"""
try:
super().recv_events()
finally:
# If the connection is closed during the handshake, unblock it.
self.response_rcvd.set()
def connect(
uri: str,
*,
# TCP/TLS
sock: socket.socket | None = None,
ssl: ssl_module.SSLContext | None = None,
server_hostname: str | None = None,
# WebSocket
origin: Origin | None = None,
extensions: Sequence[ClientExtensionFactory] | None = None,
subprotocols: Sequence[Subprotocol] | None = None,
additional_headers: HeadersLike | None = None,
user_agent_header: str | None = USER_AGENT,
compression: str | None = "deflate",
# Timeouts
open_timeout: float | None = 10,
close_timeout: float | None = 10,
# Limits
max_size: int | None = 2**20,
max_queue: int | None | tuple[int | None, int | None] = 16,
# Logging
logger: LoggerLike | None = None,
# Escape hatch for advanced customization
create_connection: type[ClientConnection] | None = None,
**kwargs: Any,
) -> ClientConnection:
"""
Connect to the WebSocket server at ``uri``.
This function returns a :class:`ClientConnection` instance, which you can
use to send and receive messages.
:func:`connect` may be used as a context manager::
from websockets.sync.client import connect
with connect(...) as websocket:
...
The connection is closed automatically when exiting the context.
Args:
uri: URI of the WebSocket server.
sock: Preexisting TCP socket. ``sock`` overrides the host and port
from ``uri``. You may call :func:`socket.create_connection` to
create a suitable TCP socket.
ssl: Configuration for enabling TLS on the connection.
server_hostname: Host name for the TLS handshake. ``server_hostname``
overrides the host name from ``uri``.
origin: Value of the ``Origin`` header, for servers that require it.
extensions: List of supported extensions, in order in which they
should be negotiated and run.
subprotocols: List of supported subprotocols, in order of decreasing
preference.
additional_headers (HeadersLike | None): Arbitrary HTTP headers to add
to the handshake request.
user_agent_header: Value of the ``User-Agent`` request header.
It defaults to ``"Python/x.y.z websockets/X.Y"``.
Setting it to :obj:`None` removes the header.
compression: The "permessage-deflate" extension is enabled by default.
Set ``compression`` to :obj:`None` to disable it. See the
:doc:`compression guide <../../topics/compression>` for details.
open_timeout: Timeout for opening the connection in seconds.
:obj:`None` disables the timeout.
close_timeout: Timeout for closing the connection in seconds.
:obj:`None` disables the timeout.
max_size: Maximum size of incoming messages in bytes.
:obj:`None` disables the limit.
max_queue: High-water mark of the buffer where frames are received.
It defaults to 16 frames. The low-water mark defaults to ``max_queue
// 4``. You may pass a ``(high, low)`` tuple to set the high-water
and low-water marks. If you want to disable flow control entirely,
you may set it to ``None``, although that's a bad idea.
logger: Logger for this client.
It defaults to ``logging.getLogger("websockets.client")``.
See the :doc:`logging guide <../../topics/logging>` for details.
create_connection: Factory for the :class:`ClientConnection` managing
the connection. Set it to a wrapper or a subclass to customize
connection handling.
Any other keyword arguments are passed to :func:`~socket.create_connection`.
Raises:
InvalidURI: If ``uri`` isn't a valid WebSocket URI.
OSError: If the TCP connection fails.
InvalidHandshake: If the opening handshake fails.
TimeoutError: If the opening handshake times out.
"""
# Process parameters
# Backwards compatibility: ssl used to be called ssl_context.
if ssl is None and "ssl_context" in kwargs:
ssl = kwargs.pop("ssl_context")
warnings.warn( # deprecated in 13.0 - 2024-08-20
"ssl_context was renamed to ssl",
DeprecationWarning,
)
wsuri = parse_uri(uri)
if not wsuri.secure and ssl is not None:
raise ValueError("ssl argument is incompatible with a ws:// URI")
# Private APIs for unix_connect()
unix: bool = kwargs.pop("unix", False)
path: str | None = kwargs.pop("path", None)
if unix:
if path is None and sock is None:
raise ValueError("missing path argument")
elif path is not None and sock is not None:
raise ValueError("path and sock arguments are incompatible")
if subprotocols is not None:
validate_subprotocols(subprotocols)
if compression == "deflate":
extensions = enable_client_permessage_deflate(extensions)
elif compression is not None:
raise ValueError(f"unsupported compression: {compression}")
# Calculate timeouts on the TCP, TLS, and WebSocket handshakes.
# The TCP and TLS timeouts must be set on the socket, then removed
# to avoid conflicting with the WebSocket timeout in handshake().
deadline = Deadline(open_timeout)
if create_connection is None:
create_connection = ClientConnection
try:
# Connect socket
if sock is None:
if unix:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(deadline.timeout())
assert path is not None # mypy cannot figure this out
sock.connect(path)
else:
kwargs.setdefault("timeout", deadline.timeout())
sock = socket.create_connection((wsuri.host, wsuri.port), **kwargs)
sock.settimeout(None)
# Disable Nagle algorithm
if not unix:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
# Initialize TLS wrapper and perform TLS handshake
if wsuri.secure:
if ssl is None:
ssl = ssl_module.create_default_context()
if server_hostname is None:
server_hostname = wsuri.host
sock.settimeout(deadline.timeout())
sock = ssl.wrap_socket(sock, server_hostname=server_hostname)
sock.settimeout(None)
# Initialize WebSocket protocol
protocol = ClientProtocol(
wsuri,
origin=origin,
extensions=extensions,
subprotocols=subprotocols,
max_size=max_size,
logger=logger,
)
# Initialize WebSocket connection
connection = create_connection(
sock,
protocol,
close_timeout=close_timeout,
max_queue=max_queue,
)
except Exception:
if sock is not None:
sock.close()
raise
try:
connection.handshake(
additional_headers,
user_agent_header,
deadline.timeout(),
)
except Exception:
connection.close_socket()
connection.recv_events_thread.join()
raise
return connection
def unix_connect(
path: str | None = None,
uri: str | None = None,
**kwargs: Any,
) -> ClientConnection:
"""
Connect to a WebSocket server listening on a Unix socket.
This function accepts the same keyword arguments as :func:`connect`.
It's only available on Unix.
It's mainly useful for debugging servers listening on Unix sockets.
Args:
path: File system path to the Unix socket.
uri: URI of the WebSocket server. ``uri`` defaults to
``ws://localhost/`` or, when a ``ssl`` is provided, to
``wss://localhost/``.
"""
if uri is None:
# Backwards compatibility: ssl used to be called ssl_context.
if kwargs.get("ssl") is None and kwargs.get("ssl_context") is None:
uri = "ws://localhost/"
else:
uri = "wss://localhost/"
return connect(uri=uri, unix=True, path=path, **kwargs)

View File

@ -0,0 +1,931 @@
from __future__ import annotations
import contextlib
import logging
import random
import socket
import struct
import threading
import uuid
from collections.abc import Iterable, Iterator, Mapping
from types import TracebackType
from typing import Any
from ..exceptions import (
ConcurrencyError,
ConnectionClosed,
ConnectionClosedOK,
ProtocolError,
)
from ..frames import DATA_OPCODES, BytesLike, CloseCode, Frame, Opcode
from ..http11 import Request, Response
from ..protocol import CLOSED, OPEN, Event, Protocol, State
from ..typing import Data, LoggerLike, Subprotocol
from .messages import Assembler
from .utils import Deadline
__all__ = ["Connection"]
class Connection:
"""
:mod:`threading` implementation of a WebSocket connection.
:class:`Connection` provides APIs shared between WebSocket servers and
clients.
You shouldn't use it directly. Instead, use
:class:`~websockets.sync.client.ClientConnection` or
:class:`~websockets.sync.server.ServerConnection`.
"""
recv_bufsize = 65536
def __init__(
self,
socket: socket.socket,
protocol: Protocol,
*,
close_timeout: float | None = 10,
max_queue: int | None | tuple[int | None, int | None] = 16,
) -> None:
self.socket = socket
self.protocol = protocol
self.close_timeout = close_timeout
if isinstance(max_queue, int) or max_queue is None:
max_queue = (max_queue, None)
self.max_queue = max_queue
# Inject reference to this instance in the protocol's logger.
self.protocol.logger = logging.LoggerAdapter(
self.protocol.logger,
{"websocket": self},
)
# Copy attributes from the protocol for convenience.
self.id: uuid.UUID = self.protocol.id
"""Unique identifier of the connection. Useful in logs."""
self.logger: LoggerLike = self.protocol.logger
"""Logger for this connection."""
self.debug = self.protocol.debug
# HTTP handshake request and response.
self.request: Request | None = None
"""Opening handshake request."""
self.response: Response | None = None
"""Opening handshake response."""
# Mutex serializing interactions with the protocol.
self.protocol_mutex = threading.Lock()
# Lock stopping reads when the assembler buffer is full.
self.recv_flow_control = threading.Lock()
# Assembler turning frames into messages and serializing reads.
self.recv_messages = Assembler(
*self.max_queue,
pause=self.recv_flow_control.acquire,
resume=self.recv_flow_control.release,
)
# Whether we are busy sending a fragmented message.
self.send_in_progress = False
# Deadline for the closing handshake.
self.close_deadline: Deadline | None = None
# Mapping of ping IDs to pong waiters, in chronological order.
self.ping_waiters: dict[bytes, threading.Event] = {}
# Exception raised in recv_events, to be chained to ConnectionClosed
# in the user thread in order to show why the TCP connection dropped.
self.recv_exc: BaseException | None = None
# Receiving events from the socket. This thread is marked as daemon to
# allow creating a connection in a non-daemon thread and using it in a
# daemon thread. This mustn't prevent the interpreter from exiting.
self.recv_events_thread = threading.Thread(
target=self.recv_events,
daemon=True,
)
self.recv_events_thread.start()
# Public attributes
@property
def local_address(self) -> Any:
"""
Local address of the connection.
For IPv4 connections, this is a ``(host, port)`` tuple.
The format of the address depends on the address family.
See :meth:`~socket.socket.getsockname`.
"""
return self.socket.getsockname()
@property
def remote_address(self) -> Any:
"""
Remote address of the connection.
For IPv4 connections, this is a ``(host, port)`` tuple.
The format of the address depends on the address family.
See :meth:`~socket.socket.getpeername`.
"""
return self.socket.getpeername()
@property
def state(self) -> State:
"""
State of the WebSocket connection, defined in :rfc:`6455`.
This attribute is provided for completeness. Typical applications
shouldn't check its value. Instead, they should call :meth:`~recv` or
:meth:`send` and handle :exc:`~websockets.exceptions.ConnectionClosed`
exceptions.
"""
return self.protocol.state
@property
def subprotocol(self) -> Subprotocol | None:
"""
Subprotocol negotiated during the opening handshake.
:obj:`None` if no subprotocol was negotiated.
"""
return self.protocol.subprotocol
@property
def close_code(self) -> int | None:
"""
State of the WebSocket connection, defined in :rfc:`6455`.
This attribute is provided for completeness. Typical applications
shouldn't check its value. Instead, they should inspect attributes
of :exc:`~websockets.exceptions.ConnectionClosed` exceptions.
"""
return self.protocol.close_code
@property
def close_reason(self) -> str | None:
"""
State of the WebSocket connection, defined in :rfc:`6455`.
This attribute is provided for completeness. Typical applications
shouldn't check its value. Instead, they should inspect attributes
of :exc:`~websockets.exceptions.ConnectionClosed` exceptions.
"""
return self.protocol.close_reason
# Public methods
def __enter__(self) -> Connection:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
if exc_type is None:
self.close()
else:
self.close(CloseCode.INTERNAL_ERROR)
def __iter__(self) -> Iterator[Data]:
"""
Iterate on incoming messages.
The iterator calls :meth:`recv` and yields messages in an infinite loop.
It exits when the connection is closed normally. It raises a
:exc:`~websockets.exceptions.ConnectionClosedError` exception after a
protocol error or a network failure.
"""
try:
while True:
yield self.recv()
except ConnectionClosedOK:
return
def recv(self, timeout: float | None = None, decode: bool | None = None) -> Data:
"""
Receive the next message.
When the connection is closed, :meth:`recv` raises
:exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it raises
:exc:`~websockets.exceptions.ConnectionClosedOK` after a normal closure
and :exc:`~websockets.exceptions.ConnectionClosedError` after a protocol
error or a network failure. This is how you detect the end of the
message stream.
If ``timeout`` is :obj:`None`, block until a message is received. If
``timeout`` is set and no message is received within ``timeout``
seconds, raise :exc:`TimeoutError`. Set ``timeout`` to ``0`` to check if
a message was already received.
If the message is fragmented, wait until all fragments are received,
reassemble them, and return the whole message.
Args:
timeout: Timeout for receiving a message in seconds.
decode: Set this flag to override the default behavior of returning
:class:`str` or :class:`bytes`. See below for details.
Returns:
A string (:class:`str`) for a Text_ frame or a bytestring
(:class:`bytes`) for a Binary_ frame.
.. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
.. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
You may override this behavior with the ``decode`` argument:
* Set ``decode=False`` to disable UTF-8 decoding of Text_ frames and
return a bytestring (:class:`bytes`). This improves performance
when decoding isn't needed, for example if the message contains
JSON and you're using a JSON library that expects a bytestring.
* Set ``decode=True`` to force UTF-8 decoding of Binary_ frames
and return a string (:class:`str`). This may be useful for
servers that send binary frames instead of text frames.
Raises:
ConnectionClosed: When the connection is closed.
ConcurrencyError: If two threads call :meth:`recv` or
:meth:`recv_streaming` concurrently.
"""
try:
return self.recv_messages.get(timeout, decode)
except EOFError:
pass
# fallthrough
except ConcurrencyError:
raise ConcurrencyError(
"cannot call recv while another thread "
"is already running recv or recv_streaming"
) from None
except UnicodeDecodeError as exc:
with self.send_context():
self.protocol.fail(
CloseCode.INVALID_DATA,
f"{exc.reason} at position {exc.start}",
)
# fallthrough
# Wait for the protocol state to be CLOSED before accessing close_exc.
self.recv_events_thread.join()
raise self.protocol.close_exc from self.recv_exc
def recv_streaming(self, decode: bool | None = None) -> Iterator[Data]:
"""
Receive the next message frame by frame.
This method is designed for receiving fragmented messages. It returns an
iterator that yields each fragment as it is received. This iterator must
be fully consumed. Else, future calls to :meth:`recv` or
:meth:`recv_streaming` will raise
:exc:`~websockets.exceptions.ConcurrencyError`, making the connection
unusable.
:meth:`recv_streaming` raises the same exceptions as :meth:`recv`.
Args:
decode: Set this flag to override the default behavior of returning
:class:`str` or :class:`bytes`. See below for details.
Returns:
An iterator of strings (:class:`str`) for a Text_ frame or
bytestrings (:class:`bytes`) for a Binary_ frame.
.. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
.. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
You may override this behavior with the ``decode`` argument:
* Set ``decode=False`` to disable UTF-8 decoding of Text_ frames
and return bytestrings (:class:`bytes`). This may be useful to
optimize performance when decoding isn't needed.
* Set ``decode=True`` to force UTF-8 decoding of Binary_ frames
and return strings (:class:`str`). This is useful for servers
that send binary frames instead of text frames.
Raises:
ConnectionClosed: When the connection is closed.
ConcurrencyError: If two threads call :meth:`recv` or
:meth:`recv_streaming` concurrently.
"""
try:
yield from self.recv_messages.get_iter(decode)
return
except EOFError:
pass
# fallthrough
except ConcurrencyError:
raise ConcurrencyError(
"cannot call recv_streaming while another thread "
"is already running recv or recv_streaming"
) from None
except UnicodeDecodeError as exc:
with self.send_context():
self.protocol.fail(
CloseCode.INVALID_DATA,
f"{exc.reason} at position {exc.start}",
)
# fallthrough
# Wait for the protocol state to be CLOSED before accessing close_exc.
self.recv_events_thread.join()
raise self.protocol.close_exc from self.recv_exc
def send(
self,
message: Data | Iterable[Data],
text: bool | None = None,
) -> None:
"""
Send a message.
A string (:class:`str`) is sent as a Text_ frame. A bytestring or
bytes-like object (:class:`bytes`, :class:`bytearray`, or
:class:`memoryview`) is sent as a Binary_ frame.
.. _Text: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
.. _Binary: https://datatracker.ietf.org/doc/html/rfc6455#section-5.6
You may override this behavior with the ``text`` argument:
* Set ``text=True`` to send a bytestring or bytes-like object
(:class:`bytes`, :class:`bytearray`, or :class:`memoryview`) as a
Text_ frame. This improves performance when the message is already
UTF-8 encoded, for example if the message contains JSON and you're
using a JSON library that produces a bytestring.
* Set ``text=False`` to send a string (:class:`str`) in a Binary_
frame. This may be useful for servers that expect binary frames
instead of text frames.
:meth:`send` also accepts an iterable of strings, bytestrings, or
bytes-like objects to enable fragmentation_. Each item is treated as a
message fragment and sent in its own frame. All items must be of the
same type, or else :meth:`send` will raise a :exc:`TypeError` and the
connection will be closed.
.. _fragmentation: https://datatracker.ietf.org/doc/html/rfc6455#section-5.4
:meth:`send` rejects dict-like objects because this is often an error.
(If you really want to send the keys of a dict-like object as fragments,
call its :meth:`~dict.keys` method and pass the result to :meth:`send`.)
When the connection is closed, :meth:`send` raises
:exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it
raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal
connection closure and
:exc:`~websockets.exceptions.ConnectionClosedError` after a protocol
error or a network failure.
Args:
message: Message to send.
Raises:
ConnectionClosed: When the connection is closed.
ConcurrencyError: If the connection is sending a fragmented message.
TypeError: If ``message`` doesn't have a supported type.
"""
# Unfragmented message -- this case must be handled first because
# strings and bytes-like objects are iterable.
if isinstance(message, str):
with self.send_context():
if self.send_in_progress:
raise ConcurrencyError(
"cannot call send while another thread "
"is already running send"
)
if text is False:
self.protocol.send_binary(message.encode())
else:
self.protocol.send_text(message.encode())
elif isinstance(message, BytesLike):
with self.send_context():
if self.send_in_progress:
raise ConcurrencyError(
"cannot call send while another thread "
"is already running send"
)
if text is True:
self.protocol.send_text(message)
else:
self.protocol.send_binary(message)
# Catch a common mistake -- passing a dict to send().
elif isinstance(message, Mapping):
raise TypeError("data is a dict-like object")
# Fragmented message -- regular iterator.
elif isinstance(message, Iterable):
chunks = iter(message)
try:
chunk = next(chunks)
except StopIteration:
return
try:
# First fragment.
if isinstance(chunk, str):
with self.send_context():
if self.send_in_progress:
raise ConcurrencyError(
"cannot call send while another thread "
"is already running send"
)
self.send_in_progress = True
if text is False:
self.protocol.send_binary(chunk.encode(), fin=False)
else:
self.protocol.send_text(chunk.encode(), fin=False)
encode = True
elif isinstance(chunk, BytesLike):
with self.send_context():
if self.send_in_progress:
raise ConcurrencyError(
"cannot call send while another thread "
"is already running send"
)
self.send_in_progress = True
if text is True:
self.protocol.send_text(chunk, fin=False)
else:
self.protocol.send_binary(chunk, fin=False)
encode = False
else:
raise TypeError("data iterable must contain bytes or str")
# Other fragments
for chunk in chunks:
if isinstance(chunk, str) and encode:
with self.send_context():
assert self.send_in_progress
self.protocol.send_continuation(chunk.encode(), fin=False)
elif isinstance(chunk, BytesLike) and not encode:
with self.send_context():
assert self.send_in_progress
self.protocol.send_continuation(chunk, fin=False)
else:
raise TypeError("data iterable must contain uniform types")
# Final fragment.
with self.send_context():
self.protocol.send_continuation(b"", fin=True)
self.send_in_progress = False
except ConcurrencyError:
# We didn't start sending a fragmented message.
# The connection is still usable.
raise
except Exception:
# We're half-way through a fragmented message and we can't
# complete it. This makes the connection unusable.
with self.send_context():
self.protocol.fail(
CloseCode.INTERNAL_ERROR,
"error in fragmented message",
)
raise
else:
raise TypeError("data must be str, bytes, or iterable")
def close(self, code: int = CloseCode.NORMAL_CLOSURE, reason: str = "") -> None:
"""
Perform the closing handshake.
:meth:`close` waits for the other end to complete the handshake, for the
TCP connection to terminate, and for all incoming messages to be read
with :meth:`recv`.
:meth:`close` is idempotent: it doesn't do anything once the
connection is closed.
Args:
code: WebSocket close code.
reason: WebSocket close reason.
"""
try:
# The context manager takes care of waiting for the TCP connection
# to terminate after calling a method that sends a close frame.
with self.send_context():
if self.send_in_progress:
self.protocol.fail(
CloseCode.INTERNAL_ERROR,
"close during fragmented message",
)
else:
self.protocol.send_close(code, reason)
except ConnectionClosed:
# Ignore ConnectionClosed exceptions raised from send_context().
# They mean that the connection is closed, which was the goal.
pass
def ping(self, data: Data | None = None) -> threading.Event:
"""
Send a Ping_.
.. _Ping: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.2
A ping may serve as a keepalive or as a check that the remote endpoint
received all messages up to this point
Args:
data: Payload of the ping. A :class:`str` will be encoded to UTF-8.
If ``data`` is :obj:`None`, the payload is four random bytes.
Returns:
An event that will be set when the corresponding pong is received.
You can ignore it if you don't intend to wait.
::
pong_event = ws.ping()
pong_event.wait() # only if you want to wait for the pong
Raises:
ConnectionClosed: When the connection is closed.
ConcurrencyError: If another ping was sent with the same data and
the corresponding pong wasn't received yet.
"""
if isinstance(data, BytesLike):
data = bytes(data)
elif isinstance(data, str):
data = data.encode()
elif data is not None:
raise TypeError("data must be str or bytes-like")
with self.send_context():
# Protect against duplicates if a payload is explicitly set.
if data in self.ping_waiters:
raise ConcurrencyError("already waiting for a pong with the same data")
# Generate a unique random payload otherwise.
while data is None or data in self.ping_waiters:
data = struct.pack("!I", random.getrandbits(32))
pong_waiter = threading.Event()
self.ping_waiters[data] = pong_waiter
self.protocol.send_ping(data)
return pong_waiter
def pong(self, data: Data = b"") -> None:
"""
Send a Pong_.
.. _Pong: https://datatracker.ietf.org/doc/html/rfc6455#section-5.5.3
An unsolicited pong may serve as a unidirectional heartbeat.
Args:
data: Payload of the pong. A :class:`str` will be encoded to UTF-8.
Raises:
ConnectionClosed: When the connection is closed.
"""
if isinstance(data, BytesLike):
data = bytes(data)
elif isinstance(data, str):
data = data.encode()
else:
raise TypeError("data must be str or bytes-like")
with self.send_context():
self.protocol.send_pong(data)
# Private methods
def process_event(self, event: Event) -> None:
"""
Process one incoming event.
This method is overridden in subclasses to handle the handshake.
"""
assert isinstance(event, Frame)
if event.opcode in DATA_OPCODES:
self.recv_messages.put(event)
if event.opcode is Opcode.PONG:
self.acknowledge_pings(bytes(event.data))
def acknowledge_pings(self, data: bytes) -> None:
"""
Acknowledge pings when receiving a pong.
"""
with self.protocol_mutex:
# Ignore unsolicited pong.
if data not in self.ping_waiters:
return
# Sending a pong for only the most recent ping is legal.
# Acknowledge all previous pings too in that case.
ping_id = None
ping_ids = []
for ping_id, ping in self.ping_waiters.items():
ping_ids.append(ping_id)
ping.set()
if ping_id == data:
break
else:
raise AssertionError("solicited pong not found in pings")
# Remove acknowledged pings from self.ping_waiters.
for ping_id in ping_ids:
del self.ping_waiters[ping_id]
def recv_events(self) -> None:
"""
Read incoming data from the socket and process events.
Run this method in a thread as long as the connection is alive.
``recv_events()`` exits immediately when the ``self.socket`` is closed.
"""
try:
while True:
try:
with self.recv_flow_control:
if self.close_deadline is not None:
self.socket.settimeout(self.close_deadline.timeout())
data = self.socket.recv(self.recv_bufsize)
except Exception as exc:
if self.debug:
self.logger.debug(
"! error while receiving data",
exc_info=True,
)
# When the closing handshake is initiated by our side,
# recv() may block until send_context() closes the socket.
# In that case, send_context() already set recv_exc.
# Calling set_recv_exc() avoids overwriting it.
with self.protocol_mutex:
self.set_recv_exc(exc)
break
if data == b"":
break
# Acquire the connection lock.
with self.protocol_mutex:
# Feed incoming data to the protocol.
self.protocol.receive_data(data)
# This isn't expected to raise an exception.
events = self.protocol.events_received()
# Write outgoing data to the socket.
try:
self.send_data()
except Exception as exc:
if self.debug:
self.logger.debug(
"! error while sending data",
exc_info=True,
)
# Similarly to the above, avoid overriding an exception
# set by send_context(), in case of a race condition
# i.e. send_context() closes the socket after recv()
# returns above but before send_data() calls send().
self.set_recv_exc(exc)
break
if self.protocol.close_expected():
# If the connection is expected to close soon, set the
# close deadline based on the close timeout.
if self.close_deadline is None:
self.close_deadline = Deadline(self.close_timeout)
# Unlock conn_mutex before processing events. Else, the
# application can't send messages in response to events.
# If self.send_data raised an exception, then events are lost.
# Given that automatic responses write small amounts of data,
# this should be uncommon, so we don't handle the edge case.
for event in events:
# This isn't expected to raise an exception.
self.process_event(event)
# Breaking out of the while True: ... loop means that we believe
# that the socket doesn't work anymore.
with self.protocol_mutex:
# Feed the end of the data stream to the protocol.
self.protocol.receive_eof()
# This isn't expected to raise an exception.
events = self.protocol.events_received()
# There is no error handling because send_data() can only write
# the end of the data stream here and it handles errors itself.
self.send_data()
# This code path is triggered when receiving an HTTP response
# without a Content-Length header. This is the only case where
# reading until EOF generates an event; all other events have
# a known length. Ignore for coverage measurement because tests
# are in test_client.py rather than test_connection.py.
for event in events: # pragma: no cover
# This isn't expected to raise an exception.
self.process_event(event)
except Exception as exc:
# This branch should never run. It's a safety net in case of bugs.
self.logger.error("unexpected internal error", exc_info=True)
with self.protocol_mutex:
self.set_recv_exc(exc)
finally:
# This isn't expected to raise an exception.
self.close_socket()
@contextlib.contextmanager
def send_context(
self,
*,
expected_state: State = OPEN, # CONNECTING during the opening handshake
) -> Iterator[None]:
"""
Create a context for writing to the connection from user code.
On entry, :meth:`send_context` acquires the connection lock and checks
that the connection is open; on exit, it writes outgoing data to the
socket::
with self.send_context():
self.protocol.send_text(message.encode())
When the connection isn't open on entry, when the connection is expected
to close on exit, or when an unexpected error happens, terminating the
connection, :meth:`send_context` waits until the connection is closed
then raises :exc:`~websockets.exceptions.ConnectionClosed`.
"""
# Should we wait until the connection is closed?
wait_for_close = False
# Should we close the socket and raise ConnectionClosed?
raise_close_exc = False
# What exception should we chain ConnectionClosed to?
original_exc: BaseException | None = None
# Acquire the protocol lock.
with self.protocol_mutex:
if self.protocol.state is expected_state:
# Let the caller interact with the protocol.
try:
yield
except (ProtocolError, ConcurrencyError):
# The protocol state wasn't changed. Exit immediately.
raise
except Exception as exc:
self.logger.error("unexpected internal error", exc_info=True)
# This branch should never run. It's a safety net in case of
# bugs. Since we don't know what happened, we will close the
# connection and raise the exception to the caller.
wait_for_close = False
raise_close_exc = True
original_exc = exc
else:
# Check if the connection is expected to close soon.
if self.protocol.close_expected():
wait_for_close = True
# If the connection is expected to close soon, set the
# close deadline based on the close timeout.
# Since we tested earlier that protocol.state was OPEN
# (or CONNECTING) and we didn't release protocol_mutex,
# it is certain that self.close_deadline is still None.
assert self.close_deadline is None
self.close_deadline = Deadline(self.close_timeout)
# Write outgoing data to the socket.
try:
self.send_data()
except Exception as exc:
if self.debug:
self.logger.debug(
"! error while sending data",
exc_info=True,
)
# While the only expected exception here is OSError,
# other exceptions would be treated identically.
wait_for_close = False
raise_close_exc = True
original_exc = exc
else: # self.protocol.state is not expected_state
# Minor layering violation: we assume that the connection
# will be closing soon if it isn't in the expected state.
wait_for_close = True
raise_close_exc = True
# To avoid a deadlock, release the connection lock by exiting the
# context manager before waiting for recv_events() to terminate.
# If the connection is expected to close soon and the close timeout
# elapses, close the socket to terminate the connection.
if wait_for_close:
if self.close_deadline is None:
timeout = self.close_timeout
else:
# Thread.join() returns immediately if timeout is negative.
timeout = self.close_deadline.timeout(raise_if_elapsed=False)
self.recv_events_thread.join(timeout)
if self.recv_events_thread.is_alive():
# There's no risk to overwrite another error because
# original_exc is never set when wait_for_close is True.
assert original_exc is None
original_exc = TimeoutError("timed out while closing connection")
# Set recv_exc before closing the socket in order to get
# proper exception reporting.
raise_close_exc = True
with self.protocol_mutex:
self.set_recv_exc(original_exc)
# If an error occurred, close the socket to terminate the connection and
# raise an exception.
if raise_close_exc:
self.close_socket()
# Wait for the protocol state to be CLOSED before accessing close_exc.
self.recv_events_thread.join()
raise self.protocol.close_exc from original_exc
def send_data(self) -> None:
"""
Send outgoing data.
This method requires holding protocol_mutex.
Raises:
OSError: When a socket operations fails.
"""
assert self.protocol_mutex.locked()
for data in self.protocol.data_to_send():
if data:
if self.close_deadline is not None:
self.socket.settimeout(self.close_deadline.timeout())
self.socket.sendall(data)
else:
try:
self.socket.shutdown(socket.SHUT_WR)
except OSError: # socket already closed
pass
def set_recv_exc(self, exc: BaseException | None) -> None:
"""
Set recv_exc, if not set yet.
This method requires holding protocol_mutex.
"""
assert self.protocol_mutex.locked()
if self.recv_exc is None: # pragma: no branch
self.recv_exc = exc
def close_socket(self) -> None:
"""
Shutdown and close socket. Close message assembler.
Calling close_socket() guarantees that recv_events() terminates. Indeed,
recv_events() may block only on socket.recv() or on recv_messages.put().
"""
# shutdown() is required to interrupt recv() on Linux.
try:
self.socket.shutdown(socket.SHUT_RDWR)
except OSError:
pass # socket is already closed
self.socket.close()
# Calling protocol.receive_eof() is safe because it's idempotent.
# This guarantees that the protocol state becomes CLOSED.
self.protocol.receive_eof()
assert self.protocol.state is CLOSED
# Abort recv() with a ConnectionClosed exception.
self.recv_messages.close()

View File

@ -0,0 +1,306 @@
from __future__ import annotations
import codecs
import queue
import threading
from typing import Any, Callable, Iterable, Iterator
from ..exceptions import ConcurrencyError
from ..frames import OP_BINARY, OP_CONT, OP_TEXT, Frame
from ..typing import Data
from .utils import Deadline
__all__ = ["Assembler"]
UTF8Decoder = codecs.getincrementaldecoder("utf-8")
class Assembler:
"""
Assemble messages from frames.
:class:`Assembler` expects only data frames. The stream of frames must
respect the protocol; if it doesn't, the behavior is undefined.
Args:
pause: Called when the buffer of frames goes above the high water mark;
should pause reading from the network.
resume: Called when the buffer of frames goes below the low water mark;
should resume reading from the network.
"""
def __init__(
self,
high: int | None = None,
low: int | None = None,
pause: Callable[[], Any] = lambda: None,
resume: Callable[[], Any] = lambda: None,
) -> None:
# Serialize reads and writes -- except for reads via synchronization
# primitives provided by the threading and queue modules.
self.mutex = threading.Lock()
# Queue of incoming frames.
self.frames: queue.SimpleQueue[Frame | None] = queue.SimpleQueue()
# We cannot put a hard limit on the size of the queue because a single
# call to Protocol.data_received() could produce thousands of frames,
# which must be buffered. Instead, we pause reading when the buffer goes
# above the high limit and we resume when it goes under the low limit.
if high is not None and low is None:
low = high // 4
if high is None and low is not None:
high = low * 4
if high is not None and low is not None:
if low < 0:
raise ValueError("low must be positive or equal to zero")
if high < low:
raise ValueError("high must be greater than or equal to low")
self.high, self.low = high, low
self.pause = pause
self.resume = resume
self.paused = False
# This flag prevents concurrent calls to get() by user code.
self.get_in_progress = False
# This flag marks the end of the connection.
self.closed = False
def get_next_frame(self, timeout: float | None = None) -> Frame:
# Helper to factor out the logic for getting the next frame from the
# queue, while handling timeouts and reaching the end of the stream.
if self.closed:
try:
frame = self.frames.get(block=False)
except queue.Empty:
raise EOFError("stream of frames ended") from None
else:
try:
frame = self.frames.get(block=True, timeout=timeout)
except queue.Empty:
raise TimeoutError(f"timed out in {timeout:.1f}s") from None
if frame is None:
raise EOFError("stream of frames ended")
return frame
def reset_queue(self, frames: Iterable[Frame]) -> None:
# Helper to put frames back into the queue after they were fetched.
# This happens only when the queue is empty. However, by the time
# we acquire self.mutex, put() may have added items in the queue.
# Therefore, we must handle the case where the queue is not empty.
frame: Frame | None
with self.mutex:
queued = []
try:
while True:
queued.append(self.frames.get(block=False))
except queue.Empty:
pass
for frame in frames:
self.frames.put(frame)
# This loop runs only when a race condition occurs.
for frame in queued: # pragma: no cover
self.frames.put(frame)
def get(self, timeout: float | None = None, decode: bool | None = None) -> Data:
"""
Read the next message.
:meth:`get` returns a single :class:`str` or :class:`bytes`.
If the message is fragmented, :meth:`get` waits until the last frame is
received, then it reassembles the message and returns it. To receive
messages frame by frame, use :meth:`get_iter` instead.
Args:
timeout: If a timeout is provided and elapses before a complete
message is received, :meth:`get` raises :exc:`TimeoutError`.
decode: :obj:`False` disables UTF-8 decoding of text frames and
returns :class:`bytes`. :obj:`True` forces UTF-8 decoding of
binary frames and returns :class:`str`.
Raises:
EOFError: If the stream of frames has ended.
UnicodeDecodeError: If a text frame contains invalid UTF-8.
ConcurrencyError: If two coroutines run :meth:`get` or
:meth:`get_iter` concurrently.
TimeoutError: If a timeout is provided and elapses before a
complete message is received.
"""
with self.mutex:
if self.get_in_progress:
raise ConcurrencyError("get() or get_iter() is already running")
self.get_in_progress = True
# Locking with get_in_progress prevents concurrent execution
# until get() fetches a complete message or times out.
try:
deadline = Deadline(timeout)
# First frame
frame = self.get_next_frame(deadline.timeout())
with self.mutex:
self.maybe_resume()
assert frame.opcode is OP_TEXT or frame.opcode is OP_BINARY
if decode is None:
decode = frame.opcode is OP_TEXT
frames = [frame]
# Following frames, for fragmented messages
while not frame.fin:
try:
frame = self.get_next_frame(deadline.timeout())
except TimeoutError:
# Put frames already received back into the queue
# so that future calls to get() can return them.
self.reset_queue(frames)
raise
with self.mutex:
self.maybe_resume()
assert frame.opcode is OP_CONT
frames.append(frame)
finally:
self.get_in_progress = False
data = b"".join(frame.data for frame in frames)
if decode:
return data.decode()
else:
return data
def get_iter(self, decode: bool | None = None) -> Iterator[Data]:
"""
Stream the next message.
Iterating the return value of :meth:`get_iter` yields a :class:`str` or
:class:`bytes` for each frame in the message.
The iterator must be fully consumed before calling :meth:`get_iter` or
:meth:`get` again. Else, :exc:`ConcurrencyError` is raised.
This method only makes sense for fragmented messages. If messages aren't
fragmented, use :meth:`get` instead.
Args:
decode: :obj:`False` disables UTF-8 decoding of text frames and
returns :class:`bytes`. :obj:`True` forces UTF-8 decoding of
binary frames and returns :class:`str`.
Raises:
EOFError: If the stream of frames has ended.
UnicodeDecodeError: If a text frame contains invalid UTF-8.
ConcurrencyError: If two coroutines run :meth:`get` or
:meth:`get_iter` concurrently.
"""
with self.mutex:
if self.get_in_progress:
raise ConcurrencyError("get() or get_iter() is already running")
self.get_in_progress = True
# Locking with get_in_progress prevents concurrent execution
# until get_iter() fetches a complete message or times out.
# If get_iter() raises an exception e.g. in decoder.decode(),
# get_in_progress remains set and the connection becomes unusable.
# First frame
frame = self.get_next_frame()
with self.mutex:
self.maybe_resume()
assert frame.opcode is OP_TEXT or frame.opcode is OP_BINARY
if decode is None:
decode = frame.opcode is OP_TEXT
if decode:
decoder = UTF8Decoder()
yield decoder.decode(frame.data, frame.fin)
else:
yield frame.data
# Following frames, for fragmented messages
while not frame.fin:
frame = self.get_next_frame()
with self.mutex:
self.maybe_resume()
assert frame.opcode is OP_CONT
if decode:
yield decoder.decode(frame.data, frame.fin)
else:
yield frame.data
self.get_in_progress = False
def put(self, frame: Frame) -> None:
"""
Add ``frame`` to the next message.
Raises:
EOFError: If the stream of frames has ended.
"""
with self.mutex:
if self.closed:
raise EOFError("stream of frames ended")
self.frames.put(frame)
self.maybe_pause()
# put() and get/get_iter() call maybe_pause() and maybe_resume() while
# holding self.mutex. This guarantees that the calls interleave properly.
# Specifically, it prevents a race condition where maybe_resume() would
# run before maybe_pause(), leaving the connection incorrectly paused.
# A race condition is possible when get/get_iter() call self.frames.get()
# without holding self.mutex. However, it's harmless — and even beneficial!
# It can only result in popping an item from the queue before maybe_resume()
# runs and skipping a pause() - resume() cycle that would otherwise occur.
def maybe_pause(self) -> None:
"""Pause the writer if queue is above the high water mark."""
# Skip if flow control is disabled
if self.high is None:
return
assert self.mutex.locked()
# Check for "> high" to support high = 0
if self.frames.qsize() > self.high and not self.paused:
self.paused = True
self.pause()
def maybe_resume(self) -> None:
"""Resume the writer if queue is below the low water mark."""
# Skip if flow control is disabled
if self.low is None:
return
assert self.mutex.locked()
# Check for "<= low" to support low = 0
if self.frames.qsize() <= self.low and self.paused:
self.paused = False
self.resume()
def close(self) -> None:
"""
End the stream of frames.
Callling :meth:`close` concurrently with :meth:`get`, :meth:`get_iter`,
or :meth:`put` is safe. They will raise :exc:`EOFError`.
"""
with self.mutex:
if self.closed:
return
self.closed = True
if self.get_in_progress:
# Unblock get() or get_iter().
self.frames.put(None)

View File

@ -0,0 +1,744 @@
from __future__ import annotations
import hmac
import http
import logging
import os
import selectors
import socket
import ssl as ssl_module
import sys
import threading
import warnings
from collections.abc import Iterable, Sequence
from types import TracebackType
from typing import Any, Callable, cast
from ..exceptions import InvalidHeader
from ..extensions.base import ServerExtensionFactory
from ..extensions.permessage_deflate import enable_server_permessage_deflate
from ..frames import CloseCode
from ..headers import (
build_www_authenticate_basic,
parse_authorization_basic,
validate_subprotocols,
)
from ..http11 import SERVER, Request, Response
from ..protocol import CONNECTING, OPEN, Event
from ..server import ServerProtocol
from ..typing import LoggerLike, Origin, StatusLike, Subprotocol
from .connection import Connection
from .utils import Deadline
__all__ = ["serve", "unix_serve", "ServerConnection", "Server", "basic_auth"]
class ServerConnection(Connection):
"""
:mod:`threading` implementation of a WebSocket server connection.
:class:`ServerConnection` provides :meth:`recv` and :meth:`send` methods for
receiving and sending messages.
It supports iteration to receive messages::
for message in websocket:
process(message)
The iterator exits normally when the connection is closed with close code
1000 (OK) or 1001 (going away) or without a close code. It raises a
:exc:`~websockets.exceptions.ConnectionClosedError` when the connection is
closed with any other code.
The ``close_timeout`` and ``max_queue`` arguments have the same meaning as
in :func:`serve`.
Args:
socket: Socket connected to a WebSocket client.
protocol: Sans-I/O connection.
"""
def __init__(
self,
socket: socket.socket,
protocol: ServerProtocol,
*,
close_timeout: float | None = 10,
max_queue: int | None | tuple[int | None, int | None] = 16,
) -> None:
self.protocol: ServerProtocol
self.request_rcvd = threading.Event()
super().__init__(
socket,
protocol,
close_timeout=close_timeout,
max_queue=max_queue,
)
self.username: str # see basic_auth()
def respond(self, status: StatusLike, text: str) -> Response:
"""
Create a plain text HTTP response.
``process_request`` and ``process_response`` may call this method to
return an HTTP response instead of performing the WebSocket opening
handshake.
You can modify the response before returning it, for example by changing
HTTP headers.
Args:
status: HTTP status code.
text: HTTP response body; it will be encoded to UTF-8.
Returns:
HTTP response to send to the client.
"""
return self.protocol.reject(status, text)
def handshake(
self,
process_request: (
Callable[
[ServerConnection, Request],
Response | None,
]
| None
) = None,
process_response: (
Callable[
[ServerConnection, Request, Response],
Response | None,
]
| None
) = None,
server_header: str | None = SERVER,
timeout: float | None = None,
) -> None:
"""
Perform the opening handshake.
"""
if not self.request_rcvd.wait(timeout):
raise TimeoutError("timed out during handshake")
if self.request is not None:
with self.send_context(expected_state=CONNECTING):
response = None
if process_request is not None:
try:
response = process_request(self, self.request)
except Exception as exc:
self.protocol.handshake_exc = exc
response = self.protocol.reject(
http.HTTPStatus.INTERNAL_SERVER_ERROR,
(
"Failed to open a WebSocket connection.\n"
"See server log for more information.\n"
),
)
if response is None:
self.response = self.protocol.accept(self.request)
else:
self.response = response
if server_header:
self.response.headers["Server"] = server_header
response = None
if process_response is not None:
try:
response = process_response(self, self.request, self.response)
except Exception as exc:
self.protocol.handshake_exc = exc
response = self.protocol.reject(
http.HTTPStatus.INTERNAL_SERVER_ERROR,
(
"Failed to open a WebSocket connection.\n"
"See server log for more information.\n"
),
)
if response is not None:
self.response = response
self.protocol.send_response(self.response)
# self.protocol.handshake_exc is set when the connection is lost before
# receiving a request, when the request cannot be parsed, or when the
# handshake fails, including when process_request or process_response
# raises an exception.
# It isn't set when process_request or process_response sends an HTTP
# response that rejects the handshake.
if self.protocol.handshake_exc is not None:
raise self.protocol.handshake_exc
def process_event(self, event: Event) -> None:
"""
Process one incoming event.
"""
# First event - handshake request.
if self.request is None:
assert isinstance(event, Request)
self.request = event
self.request_rcvd.set()
# Later events - frames.
else:
super().process_event(event)
def recv_events(self) -> None:
"""
Read incoming data from the socket and process events.
"""
try:
super().recv_events()
finally:
# If the connection is closed during the handshake, unblock it.
self.request_rcvd.set()
class Server:
"""
WebSocket server returned by :func:`serve`.
This class mirrors the API of :class:`~socketserver.BaseServer`, notably the
:meth:`~socketserver.BaseServer.serve_forever` and
:meth:`~socketserver.BaseServer.shutdown` methods, as well as the context
manager protocol.
Args:
socket: Server socket listening for new connections.
handler: Handler for one connection. Receives the socket and address
returned by :meth:`~socket.socket.accept`.
logger: Logger for this server.
It defaults to ``logging.getLogger("websockets.server")``.
See the :doc:`logging guide <../../topics/logging>` for details.
"""
def __init__(
self,
socket: socket.socket,
handler: Callable[[socket.socket, Any], None],
logger: LoggerLike | None = None,
) -> None:
self.socket = socket
self.handler = handler
if logger is None:
logger = logging.getLogger("websockets.server")
self.logger = logger
if sys.platform != "win32":
self.shutdown_watcher, self.shutdown_notifier = os.pipe()
def serve_forever(self) -> None:
"""
See :meth:`socketserver.BaseServer.serve_forever`.
This method doesn't return. Calling :meth:`shutdown` from another thread
stops the server.
Typical use::
with serve(...) as server:
server.serve_forever()
"""
poller = selectors.DefaultSelector()
try:
poller.register(self.socket, selectors.EVENT_READ)
except ValueError: # pragma: no cover
# If shutdown() is called before poller.register(),
# the socket is closed and poller.register() raises
# ValueError: Invalid file descriptor: -1
return
if sys.platform != "win32":
poller.register(self.shutdown_watcher, selectors.EVENT_READ)
while True:
poller.select()
try:
# If the socket is closed, this will raise an exception and exit
# the loop. So we don't need to check the return value of select().
sock, addr = self.socket.accept()
except OSError:
break
# Since there isn't a mechanism for tracking connections and waiting
# for them to terminate, we cannot use daemon threads, or else all
# connections would be terminate brutally when closing the server.
thread = threading.Thread(target=self.handler, args=(sock, addr))
thread.start()
def shutdown(self) -> None:
"""
See :meth:`socketserver.BaseServer.shutdown`.
"""
self.socket.close()
if sys.platform != "win32":
os.write(self.shutdown_notifier, b"x")
def fileno(self) -> int:
"""
See :meth:`socketserver.BaseServer.fileno`.
"""
return self.socket.fileno()
def __enter__(self) -> Server:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.shutdown()
def __getattr__(name: str) -> Any:
if name == "WebSocketServer":
warnings.warn( # deprecated in 13.0 - 2024-08-20
"WebSocketServer was renamed to Server",
DeprecationWarning,
)
return Server
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
def serve(
handler: Callable[[ServerConnection], None],
host: str | None = None,
port: int | None = None,
*,
# TCP/TLS
sock: socket.socket | None = None,
ssl: ssl_module.SSLContext | None = None,
# WebSocket
origins: Sequence[Origin | None] | None = None,
extensions: Sequence[ServerExtensionFactory] | None = None,
subprotocols: Sequence[Subprotocol] | None = None,
select_subprotocol: (
Callable[
[ServerConnection, Sequence[Subprotocol]],
Subprotocol | None,
]
| None
) = None,
process_request: (
Callable[
[ServerConnection, Request],
Response | None,
]
| None
) = None,
process_response: (
Callable[
[ServerConnection, Request, Response],
Response | None,
]
| None
) = None,
server_header: str | None = SERVER,
compression: str | None = "deflate",
# Timeouts
open_timeout: float | None = 10,
close_timeout: float | None = 10,
# Limits
max_size: int | None = 2**20,
max_queue: int | None | tuple[int | None, int | None] = 16,
# Logging
logger: LoggerLike | None = None,
# Escape hatch for advanced customization
create_connection: type[ServerConnection] | None = None,
**kwargs: Any,
) -> Server:
"""
Create a WebSocket server listening on ``host`` and ``port``.
Whenever a client connects, the server creates a :class:`ServerConnection`,
performs the opening handshake, and delegates to the ``handler``.
The handler receives the :class:`ServerConnection` instance, which you can
use to send and receive messages.
Once the handler completes, either normally or with an exception, the server
performs the closing handshake and closes the connection.
This function returns a :class:`Server` whose API mirrors
:class:`~socketserver.BaseServer`. Treat it as a context manager to ensure
that it will be closed and call :meth:`~Server.serve_forever` to serve
requests::
from websockets.sync.server import serve
def handler(websocket):
...
with serve(handler, ...) as server:
server.serve_forever()
Args:
handler: Connection handler. It receives the WebSocket connection,
which is a :class:`ServerConnection`, in argument.
host: Network interfaces the server binds to.
See :func:`~socket.create_server` for details.
port: TCP port the server listens on.
See :func:`~socket.create_server` for details.
sock: Preexisting TCP socket. ``sock`` replaces ``host`` and ``port``.
You may call :func:`socket.create_server` to create a suitable TCP
socket.
ssl: Configuration for enabling TLS on the connection.
origins: Acceptable values of the ``Origin`` header, for defending
against Cross-Site WebSocket Hijacking attacks. Include :obj:`None`
in the list if the lack of an origin is acceptable.
extensions: List of supported extensions, in order in which they
should be negotiated and run.
subprotocols: List of supported subprotocols, in order of decreasing
preference.
select_subprotocol: Callback for selecting a subprotocol among
those supported by the client and the server. It receives a
:class:`ServerConnection` (not a
:class:`~websockets.server.ServerProtocol`!) instance and a list of
subprotocols offered by the client. Other than the first argument,
it has the same behavior as the
:meth:`ServerProtocol.select_subprotocol
<websockets.server.ServerProtocol.select_subprotocol>` method.
process_request: Intercept the request during the opening handshake.
Return an HTTP response to force the response. Return :obj:`None` to
continue normally. When you force an HTTP 101 Continue response, the
handshake is successful. Else, the connection is aborted.
process_response: Intercept the response during the opening handshake.
Modify the response or return a new HTTP response to force the
response. Return :obj:`None` to continue normally. When you force an
HTTP 101 Continue response, the handshake is successful. Else, the
connection is aborted.
server_header: Value of the ``Server`` response header.
It defaults to ``"Python/x.y.z websockets/X.Y"``. Setting it to
:obj:`None` removes the header.
compression: The "permessage-deflate" extension is enabled by default.
Set ``compression`` to :obj:`None` to disable it. See the
:doc:`compression guide <../../topics/compression>` for details.
open_timeout: Timeout for opening connections in seconds.
:obj:`None` disables the timeout.
close_timeout: Timeout for closing connections in seconds.
:obj:`None` disables the timeout.
max_size: Maximum size of incoming messages in bytes.
:obj:`None` disables the limit.
max_queue: High-water mark of the buffer where frames are received.
It defaults to 16 frames. The low-water mark defaults to ``max_queue
// 4``. You may pass a ``(high, low)`` tuple to set the high-water
and low-water marks. If you want to disable flow control entirely,
you may set it to ``None``, although that's a bad idea.
logger: Logger for this server.
It defaults to ``logging.getLogger("websockets.server")``. See the
:doc:`logging guide <../../topics/logging>` for details.
create_connection: Factory for the :class:`ServerConnection` managing
the connection. Set it to a wrapper or a subclass to customize
connection handling.
Any other keyword arguments are passed to :func:`~socket.create_server`.
"""
# Process parameters
# Backwards compatibility: ssl used to be called ssl_context.
if ssl is None and "ssl_context" in kwargs:
ssl = kwargs.pop("ssl_context")
warnings.warn( # deprecated in 13.0 - 2024-08-20
"ssl_context was renamed to ssl",
DeprecationWarning,
)
if subprotocols is not None:
validate_subprotocols(subprotocols)
if compression == "deflate":
extensions = enable_server_permessage_deflate(extensions)
elif compression is not None:
raise ValueError(f"unsupported compression: {compression}")
if create_connection is None:
create_connection = ServerConnection
# Bind socket and listen
# Private APIs for unix_connect()
unix: bool = kwargs.pop("unix", False)
path: str | None = kwargs.pop("path", None)
if sock is None:
if unix:
if path is None:
raise ValueError("missing path argument")
kwargs.setdefault("family", socket.AF_UNIX)
sock = socket.create_server(path, **kwargs)
else:
sock = socket.create_server((host, port), **kwargs)
else:
if path is not None:
raise ValueError("path and sock arguments are incompatible")
# Initialize TLS wrapper
if ssl is not None:
sock = ssl.wrap_socket(
sock,
server_side=True,
# Delay TLS handshake until after we set a timeout on the socket.
do_handshake_on_connect=False,
)
# Define request handler
def conn_handler(sock: socket.socket, addr: Any) -> None:
# Calculate timeouts on the TLS and WebSocket handshakes.
# The TLS timeout must be set on the socket, then removed
# to avoid conflicting with the WebSocket timeout in handshake().
deadline = Deadline(open_timeout)
try:
# Disable Nagle algorithm
if not unix:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
# Perform TLS handshake
if ssl is not None:
sock.settimeout(deadline.timeout())
# mypy cannot figure this out
assert isinstance(sock, ssl_module.SSLSocket)
sock.do_handshake()
sock.settimeout(None)
# Create a closure to give select_subprotocol access to connection.
protocol_select_subprotocol: (
Callable[
[ServerProtocol, Sequence[Subprotocol]],
Subprotocol | None,
]
| None
) = None
if select_subprotocol is not None:
def protocol_select_subprotocol(
protocol: ServerProtocol,
subprotocols: Sequence[Subprotocol],
) -> Subprotocol | None:
# mypy doesn't know that select_subprotocol is immutable.
assert select_subprotocol is not None
# Ensure this function is only used in the intended context.
assert protocol is connection.protocol
return select_subprotocol(connection, subprotocols)
# Initialize WebSocket protocol
protocol = ServerProtocol(
origins=origins,
extensions=extensions,
subprotocols=subprotocols,
select_subprotocol=protocol_select_subprotocol,
max_size=max_size,
logger=logger,
)
# Initialize WebSocket connection
assert create_connection is not None # help mypy
connection = create_connection(
sock,
protocol,
close_timeout=close_timeout,
max_queue=max_queue,
)
except Exception:
sock.close()
return
try:
try:
connection.handshake(
process_request,
process_response,
server_header,
deadline.timeout(),
)
except TimeoutError:
connection.close_socket()
connection.recv_events_thread.join()
return
except Exception:
connection.logger.error("opening handshake failed", exc_info=True)
connection.close_socket()
connection.recv_events_thread.join()
return
assert connection.protocol.state is OPEN
try:
handler(connection)
except Exception:
connection.logger.error("connection handler failed", exc_info=True)
connection.close(CloseCode.INTERNAL_ERROR)
else:
connection.close()
except Exception: # pragma: no cover
# Don't leak sockets on unexpected errors.
sock.close()
# Initialize server
return Server(sock, conn_handler, logger)
def unix_serve(
handler: Callable[[ServerConnection], None],
path: str | None = None,
**kwargs: Any,
) -> Server:
"""
Create a WebSocket server listening on a Unix socket.
This function accepts the same keyword arguments as :func:`serve`.
It's only available on Unix.
It's useful for deploying a server behind a reverse proxy such as nginx.
Args:
handler: Connection handler. It receives the WebSocket connection,
which is a :class:`ServerConnection`, in argument.
path: File system path to the Unix socket.
"""
return serve(handler, unix=True, path=path, **kwargs)
def is_credentials(credentials: Any) -> bool:
try:
username, password = credentials
except (TypeError, ValueError):
return False
else:
return isinstance(username, str) and isinstance(password, str)
def basic_auth(
realm: str = "",
credentials: tuple[str, str] | Iterable[tuple[str, str]] | None = None,
check_credentials: Callable[[str, str], bool] | None = None,
) -> Callable[[ServerConnection, Request], Response | None]:
"""
Factory for ``process_request`` to enforce HTTP Basic Authentication.
:func:`basic_auth` is designed to integrate with :func:`serve` as follows::
from websockets.sync.server import basic_auth, serve
with serve(
...,
process_request=basic_auth(
realm="my dev server",
credentials=("hello", "iloveyou"),
),
):
If authentication succeeds, the connection's ``username`` attribute is set.
If it fails, the server responds with an HTTP 401 Unauthorized status.
One of ``credentials`` or ``check_credentials`` must be provided; not both.
Args:
realm: Scope of protection. It should contain only ASCII characters
because the encoding of non-ASCII characters is undefined. Refer to
section 2.2 of :rfc:`7235` for details.
credentials: Hard coded authorized credentials. It can be a
``(username, password)`` pair or a list of such pairs.
check_credentials: Function that verifies credentials.
It receives ``username`` and ``password`` arguments and returns
whether they're valid.
Raises:
TypeError: If ``credentials`` or ``check_credentials`` is wrong.
ValueError: If ``credentials`` and ``check_credentials`` are both
provided or both not provided.
"""
if (credentials is None) == (check_credentials is None):
raise ValueError("provide either credentials or check_credentials")
if credentials is not None:
if is_credentials(credentials):
credentials_list = [cast(tuple[str, str], credentials)]
elif isinstance(credentials, Iterable):
credentials_list = list(cast(Iterable[tuple[str, str]], credentials))
if not all(is_credentials(item) for item in credentials_list):
raise TypeError(f"invalid credentials argument: {credentials}")
else:
raise TypeError(f"invalid credentials argument: {credentials}")
credentials_dict = dict(credentials_list)
def check_credentials(username: str, password: str) -> bool:
try:
expected_password = credentials_dict[username]
except KeyError:
return False
return hmac.compare_digest(expected_password, password)
assert check_credentials is not None # help mypy
def process_request(
connection: ServerConnection,
request: Request,
) -> Response | None:
"""
Perform HTTP Basic Authentication.
If it succeeds, set the connection's ``username`` attribute and return
:obj:`None`. If it fails, return an HTTP 401 Unauthorized responss.
"""
try:
authorization = request.headers["Authorization"]
except KeyError:
response = connection.respond(
http.HTTPStatus.UNAUTHORIZED,
"Missing credentials\n",
)
response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm)
return response
try:
username, password = parse_authorization_basic(authorization)
except InvalidHeader:
response = connection.respond(
http.HTTPStatus.UNAUTHORIZED,
"Unsupported credentials\n",
)
response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm)
return response
if not check_credentials(username, password):
response = connection.respond(
http.HTTPStatus.UNAUTHORIZED,
"Invalid credentials\n",
)
response.headers["WWW-Authenticate"] = build_www_authenticate_basic(realm)
return response
connection.username = username
return None
return process_request

View File

@ -0,0 +1,45 @@
from __future__ import annotations
import time
__all__ = ["Deadline"]
class Deadline:
"""
Manage timeouts across multiple steps.
Args:
timeout: Time available in seconds or :obj:`None` if there is no limit.
"""
def __init__(self, timeout: float | None) -> None:
self.deadline: float | None
if timeout is None:
self.deadline = None
else:
self.deadline = time.monotonic() + timeout
def timeout(self, *, raise_if_elapsed: bool = True) -> float | None:
"""
Calculate a timeout from a deadline.
Args:
raise_if_elapsed: Whether to raise :exc:`TimeoutError`
if the deadline lapsed.
Raises:
TimeoutError: If the deadline lapsed.
Returns:
Time left in seconds or :obj:`None` if there is no limit.
"""
if self.deadline is None:
return None
timeout = self.deadline - time.monotonic()
if raise_if_elapsed and timeout <= 0:
raise TimeoutError("timed out")
return timeout