diff --git a/pyproject.toml b/pyproject.toml index 5e9f7f0ca..6868919d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,20 @@ requests-kerberos = {version = "^0.15.0", optional = true} [tool.poetry.extras] pyarrow = ["pyarrow"] +# `[kernel]` extra is intentionally not declared here yet. +# `databricks-sql-kernel` is built from the databricks-sql-kernel +# repo and not yet published to PyPI; declaring it as a poetry dep +# breaks `poetry lock` for every CI job. Once the wheel is on PyPI +# the extra will be added back here: +# +# databricks-sql-kernel = {version = "^0.1.0", optional = true} +# [tool.poetry.extras] +# kernel = ["databricks-sql-kernel"] +# +# Until then, install the kernel separately: +# pip install databricks-sql-kernel +# or (local dev): +# cd databricks-sql-kernel/pyo3 && maturin develop --release [tool.poetry.group.dev.dependencies] pytest = "^7.1.2" diff --git a/src/databricks/sql/backend/kernel/__init__.py b/src/databricks/sql/backend/kernel/__init__.py new file mode 100644 index 000000000..230af47f2 --- /dev/null +++ b/src/databricks/sql/backend/kernel/__init__.py @@ -0,0 +1,25 @@ +"""Backend that delegates to the Databricks SQL Kernel (Rust) via PyO3. + +Routed when ``use_kernel=True`` is passed to ``databricks.sql.connect``. +The module's identity is "delegates to the kernel" — not the wire +protocol the kernel happens to use today (SEA REST). The kernel may +switch its default transport (SEA REST → SEA gRPC → …) without +renaming this module. + +This ``__init__`` deliberately does **not** re-export +``KernelDatabricksClient`` from ``.client``. Importing ``.client`` +loads the ``databricks_sql_kernel`` PyO3 extension at module-import +time; doing that eagerly here would make ``import +databricks.sql.backend.kernel.type_mapping`` (used by tests / by +``KernelResultSet`` consumers) require the kernel wheel even when +the caller never plans to open a kernel-backed session. Callers +that need the client import it directly: + + from databricks.sql.backend.kernel.client import KernelDatabricksClient + +``session.py::_create_backend`` already does this lazy import under +the ``use_kernel=True`` branch. + +See ``docs/designs/pysql-kernel-integration.md`` in +``databricks-sql-kernel`` for the full integration design. +""" diff --git a/src/databricks/sql/backend/kernel/auth_bridge.py b/src/databricks/sql/backend/kernel/auth_bridge.py new file mode 100644 index 000000000..01123b96c --- /dev/null +++ b/src/databricks/sql/backend/kernel/auth_bridge.py @@ -0,0 +1,111 @@ +"""Translate the connector's ``AuthProvider`` into ``databricks_sql_kernel`` +``Session`` auth kwargs. + +This phase ships PAT only. The kernel-side PyO3 binding accepts +``auth_type='pat'``; OAuth / federation / custom credentials +providers are reserved but not yet wired in either layer. Non-PAT +auth raises ``NotSupportedError`` from this bridge so the failure +surfaces at session-open time with a clear message rather than +deep inside the kernel. + +Token extraction goes through ``AuthProvider.add_headers({})`` +rather than touching auth-provider-specific attributes, so the +bridge works uniformly for every PAT shape — including +``AccessTokenAuthProvider`` wrapped in ``TokenFederationProvider`` +(which ``get_python_sql_connector_auth_provider`` does for every +provider it builds). +""" + +from __future__ import annotations + +import logging +import re +from typing import Any, Dict, Optional + +from databricks.sql.auth.authenticators import AccessTokenAuthProvider, AuthProvider +from databricks.sql.auth.token_federation import TokenFederationProvider +from databricks.sql.exc import NotSupportedError + +logger = logging.getLogger(__name__) + + +_BEARER_PREFIX = "Bearer " + +# Defense-in-depth: reject tokens containing ASCII control characters. +# A token with embedded CR/LF/NUL would let a misbehaving HTTP stack +# split or terminate the Authorization header line, opening a header- +# injection sink. Real PATs and federation-exchanged tokens never +# contain these. +_CONTROL_CHAR_RE = re.compile(r"[\x00-\x1f\x7f]") + + +def _is_pat(auth_provider: AuthProvider) -> bool: + """Return True iff this provider ultimately wraps an + ``AccessTokenAuthProvider``. + + ``get_python_sql_connector_auth_provider`` always wraps the + base provider in a ``TokenFederationProvider``, so an + ``isinstance`` check against ``AccessTokenAuthProvider`` alone + never matches in practice. We peek through the federation + wrapper to find the real type. + """ + if isinstance(auth_provider, AccessTokenAuthProvider): + return True + if isinstance(auth_provider, TokenFederationProvider) and isinstance( + auth_provider.external_provider, AccessTokenAuthProvider + ): + return True + return False + + +def _extract_bearer_token(auth_provider: AuthProvider) -> Optional[str]: + """Pull the current bearer token out of an ``AuthProvider``. + + The connector's ``AuthProvider.add_headers`` mutates a header + dict and writes the ``Authorization: Bearer `` value. + Going through that public surface keeps us insulated from + provider-specific internals. + + Returns ``None`` if the provider did not write an Authorization + header or wrote a non-Bearer scheme — neither is representable + in the kernel's PAT auth surface. + """ + headers: Dict[str, str] = {} + auth_provider.add_headers(headers) + auth = headers.get("Authorization") + if not auth: + return None + if not auth.startswith(_BEARER_PREFIX): + return None + token = auth[len(_BEARER_PREFIX) :] + if _CONTROL_CHAR_RE.search(token): + raise ValueError( + "Bearer token contains ASCII control characters; refusing to " + "forward it to the kernel auth bridge." + ) + return token + + +def kernel_auth_kwargs(auth_provider: AuthProvider) -> Dict[str, Any]: + """Build the kwargs passed to ``databricks_sql_kernel.Session(...)``. + + PAT (including ``TokenFederationProvider``-wrapped PAT) routes + through the kernel's PAT path. Anything else raises + ``NotSupportedError`` — the kernel binding doesn't accept OAuth + today, and routing OAuth through PAT would silently break + token refresh during long-running sessions. + """ + if _is_pat(auth_provider): + token = _extract_bearer_token(auth_provider) + if not token: + raise ValueError( + "PAT auth provider did not produce a Bearer Authorization " + "header; cannot route through the kernel's PAT path" + ) + return {"auth_type": "pat", "access_token": token} + + raise NotSupportedError( + f"The kernel backend (use_kernel=True) currently only supports PAT auth, " + f"but got {type(auth_provider).__name__}. Use the Thrift backend " + "(default) for OAuth / federation / custom credential providers." + ) diff --git a/src/databricks/sql/backend/kernel/client.py b/src/databricks/sql/backend/kernel/client.py new file mode 100644 index 000000000..2bc70c618 --- /dev/null +++ b/src/databricks/sql/backend/kernel/client.py @@ -0,0 +1,581 @@ +"""``DatabricksClient`` backed by the Rust kernel via PyO3. + +Routed when ``use_kernel=True``. Constructor takes the connector's +already-built ``auth_provider`` and forwards everything else to the +kernel's ``Session``. Every kernel call goes through this thin +wrapper; this module is the single seam between the connector's +``DatabricksClient`` contract and the kernel's Python surface. + +Errors map cleanly: ``KernelError`` from the kernel is inspected +for its ``code`` attribute and re-raised as the appropriate PEP +249 exception (``DatabaseError``, ``OperationalError``, +``ProgrammingError``, etc.). Connector callers see standard +exception types, never the underlying kernel error. + +Phase 1 gaps documented in the integration design: + +- Parameter binding (``parameters=[TSparkParameter, ...]``) is not + yet supported — the PyO3 ``Statement`` doesn't expose + ``bind_param``. ``execute_command(parameters=[...])`` raises + ``NotSupportedError``. +- ``query_tags`` on execute is not supported (kernel exposes + ``statement_conf`` but PyO3 doesn't surface it). +- ``get_tables`` with a non-empty ``table_types`` filter applies + the filter client-side; today the kernel returns the full + ``SHOW TABLES`` shape unchanged. The connector's existing + ``ResultSetFilter.filter_tables_by_type`` is keyed on + ``SeaResultSet`` not ``KernelResultSet``, so we punt and let + the caller see all rows — documented as a known gap in the + design doc. +- Volume PUT/GET (staging operations): kernel has no Volume API + yet. Users on Thrift-only paths. +""" + +from __future__ import annotations + +import logging +import threading +import uuid +from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union + +from databricks.sql.backend.databricks_client import DatabricksClient +from databricks.sql.backend.kernel.auth_bridge import kernel_auth_kwargs +from databricks.sql.backend.kernel.result_set import KernelResultSet +from databricks.sql.backend.types import ( + BackendType, + CommandId, + CommandState, + SessionId, +) +from databricks.sql.exc import ( + DatabaseError, + Error, + InterfaceError, + NotSupportedError, + OperationalError, + ProgrammingError, +) +from databricks.sql.thrift_api.TCLIService import ttypes + +if TYPE_CHECKING: + from databricks.sql.client import Cursor + from databricks.sql.result_set import ResultSet + +logger = logging.getLogger(__name__) + + +try: + import databricks_sql_kernel as _kernel # type: ignore[import-not-found] +except ImportError as exc: # pragma: no cover - import-time error surfaces clearly + # The `databricks-sql-kernel` wheel is not yet on PyPI, so we + # don't yet declare it as an optional extra in pyproject.toml + # (doing so breaks `poetry lock`). Once published the install + # hint will move to `pip install 'databricks-sql-connector[kernel]'`. + raise ImportError( + "use_kernel=True requires the databricks-sql-kernel package. Install it with:\n" + " pip install databricks-sql-kernel\n" + "or for local development from the kernel repo:\n" + " cd databricks-sql-kernel/pyo3 && maturin develop --release" + ) from exc + + +# ─── Error mapping ────────────────────────────────────────────────────────── + + +# Map a kernel `code` slug to the PEP 249 exception class that best +# captures it. The match isn't a perfect 1:1 — PEP 249 has a +# narrower taxonomy than the kernel — so several kernel codes +# collapse onto the same Python exception. This table is the only +# place that mapping lives. +_CODE_TO_EXCEPTION = { + "InvalidArgument": ProgrammingError, + "Unauthenticated": OperationalError, + "PermissionDenied": OperationalError, + "NotFound": ProgrammingError, + "ResourceExhausted": OperationalError, + "Unavailable": OperationalError, + "Timeout": OperationalError, + "Cancelled": OperationalError, + "DataLoss": DatabaseError, + "Internal": DatabaseError, + "InvalidStatementHandle": ProgrammingError, + "NetworkError": OperationalError, + "SqlError": DatabaseError, + "Unknown": DatabaseError, +} + + +def _reraise_kernel_error(exc: "_kernel.KernelError") -> "Error": + """Convert a ``databricks_sql_kernel.KernelError`` to a PEP 249 + exception. + + Kernel errors carry their structured attrs (``code``, + ``message``, ``sql_state``, ``error_code``, ``query_id`` …) as + plain attributes — we copy them onto the re-raised exception so + callers can branch on them without reaching back through + ``__cause__``. + """ + code = getattr(exc, "code", "Unknown") + cls = _CODE_TO_EXCEPTION.get(code, DatabaseError) + new = cls(getattr(exc, "message", str(exc))) + # Forward the structured fields so connector users can read + # err.sql_state / err.query_id / etc. without a type-switch. + for attr in ( + "code", + "sql_state", + "error_code", + "vendor_code", + "http_status", + "retryable", + "query_id", + ): + setattr(new, attr, getattr(exc, attr, None)) + new.__cause__ = exc + return new + + +# ─── Client ───────────────────────────────────────────────────────────────── + + +class KernelDatabricksClient(DatabricksClient): + """``DatabricksClient`` that delegates to the Rust kernel. + + Owns one ``databricks_sql_kernel.Session`` per ``open_session`` + call. Async-execute handles (from ``submit()``) live in a dict + keyed on ``CommandId`` so the connector's polling APIs + (``get_query_state`` / ``get_execution_result`` / + ``cancel_command`` / ``close_command``) can find them again. + """ + + def __init__( + self, + server_hostname: str, + http_path: str, + auth_provider, + ssl_options, + catalog: Optional[str] = None, + schema: Optional[str] = None, + http_headers=None, + http_client=None, + **kwargs, + ): + # The connector hands us several fields the kernel doesn't + # consume directly (ssl_options, http_headers, http_client, + # port). Kernel manages its own HTTP stack so we + # accept-and-ignore. + self._server_hostname = server_hostname + self._http_path = http_path + self._auth_provider = auth_provider + self._catalog = catalog + self._schema = schema + self._auth_kwargs = kernel_auth_kwargs(auth_provider) + # Open ``databricks_sql_kernel.Session`` lazily in + # ``open_session`` so the Session lifecycle gates the + # underlying connection setup — same shape as Thrift's + # ``TOpenSession``. + self._kernel_session: Optional[Any] = None + self._session_id: Optional[SessionId] = None + # Async-exec handles keyed by CommandId.guid. Populated by + # ``execute_command(async_op=True)``; drained by ``close_command``. + # Guarded by ``_async_handles_lock`` so concurrent cursors on the + # same connection don't race on submit / close / close-session. + self._async_handles: Dict[str, Any] = {} + self._async_handles_lock = threading.RLock() + + # ── Session lifecycle ────────────────────────────────────────── + + def open_session( + self, + session_configuration: Optional[Dict[str, Any]], + catalog: Optional[str], + schema: Optional[str], + ) -> SessionId: + if self._kernel_session is not None: + raise InterfaceError("KernelDatabricksClient already has an open session.") + # ``session_configuration`` flows through to the kernel's + # ``session_conf`` map verbatim; the SEA endpoint enforces + # its own allow-list and rejects unknown keys. + session_conf: Optional[Dict[str, str]] = None + if session_configuration: + session_conf = {k: str(v) for k, v in session_configuration.items()} + try: + self._kernel_session = _kernel.Session( + host=self._server_hostname, + http_path=self._http_path, + catalog=catalog or self._catalog, + schema=schema or self._schema, + session_conf=session_conf, + **self._auth_kwargs, + ) + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + finally: + # Drop the raw access token from the instance once the + # kernel session is constructed (or failed). The kernel + # owns the credential from this point on; keeping a + # cleartext copy on a long-lived connector object risks + # accidental capture by pickling / debuggers / telemetry. + self._auth_kwargs.pop("access_token", None) + + # Use the kernel's real server-issued session id, not a + # synthetic UUID. Matches what the native SEA backend does. + # Bind to a local first so mypy sees a non-Optional return. + session_id = SessionId.from_sea_session_id(self._kernel_session.session_id) + self._session_id = session_id + logger.info("Opened kernel-backed session %s", session_id) + return session_id + + def close_session(self, session_id: SessionId) -> None: + if self._kernel_session is None: + return + # Close any tracked async handles first so they fire their + # server-side CloseStatement before the session goes away. + with self._async_handles_lock: + handles_to_close = list(self._async_handles.values()) + self._async_handles.clear() + for handle in handles_to_close: + try: + handle.close() + except _kernel.KernelError as exc: + logger.warning( + "Error closing async handle during session close: %s", exc + ) + try: + self._kernel_session.close() + except _kernel.KernelError as exc: + # Surface as a non-fatal warning — the kernel's Drop + # impl will retry the close fire-and-forget. PEP 249 + # discourages raising from connection.close(). + logger.warning("Error closing kernel session: %s", exc) + self._kernel_session = None + self._session_id = None + + # ── Query execution ──────────────────────────────────────────── + + def execute_command( + self, + operation: str, + session_id: SessionId, + max_rows: int, + max_bytes: int, + lz4_compression: bool, + cursor: "Cursor", + use_cloud_fetch: bool, + parameters: List[ttypes.TSparkParameter], + async_op: bool, + enforce_embedded_schema_correctness: bool, + row_limit: Optional[int] = None, + query_tags: Optional[Dict[str, Optional[str]]] = None, + ) -> Union["ResultSet", None]: + if self._kernel_session is None: + raise InterfaceError("Cannot execute_command without an open session.") + if parameters: + raise NotSupportedError( + "Parameter binding is not yet supported on the kernel backend " + "(PyO3 Statement.bind_param lands in a follow-up PR)." + ) + if query_tags: + raise NotSupportedError( + "Statement-level query_tags are not yet supported on the kernel backend." + ) + + stmt = self._kernel_session.statement() + try: + stmt.set_sql(operation) + if async_op: + async_exec = stmt.submit() + command_id = CommandId.from_sea_statement_id(async_exec.statement_id) + cursor.active_command_id = command_id + with self._async_handles_lock: + self._async_handles[command_id.guid] = async_exec + return None + executed = stmt.execute() + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + finally: + # ``Statement`` is a lifecycle owner separate from the + # executed handle it produces. Drop it here so the + # parent doesn't keep the handle alive longer than the + # caller expects. + try: + stmt.close() + except _kernel.KernelError: + pass + + command_id = CommandId.from_sea_statement_id(executed.statement_id) + cursor.active_command_id = command_id + return self._make_result_set(executed, cursor, command_id) + + def cancel_command(self, command_id: CommandId) -> None: + with self._async_handles_lock: + handle = self._async_handles.get(command_id.guid) + if handle is None: + # Sync-execute paths fully materialise the result before + # ``execute_command`` returns, so by the time + # cancel_command can fire there's nothing in flight. + # Match the Thrift backend's tolerant behaviour. + logger.debug("cancel_command: no in-flight async handle for %s", command_id) + return + try: + handle.cancel() + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + + def close_command(self, command_id: CommandId) -> None: + with self._async_handles_lock: + handle = self._async_handles.pop(command_id.guid, None) + if handle is None: + logger.debug("close_command: no tracked handle for %s", command_id) + return + try: + handle.close() + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + + def get_query_state(self, command_id: CommandId) -> CommandState: + with self._async_handles_lock: + handle = self._async_handles.get(command_id.guid) + if handle is None: + # No tracked async handle means execute_command ran + # sync and the result was materialised before returning; + # the command is terminal by construction. + return CommandState.SUCCEEDED + try: + state, failure = handle.status() + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + if state == "Failed" and failure is not None: + # Surface server-reported failure as a database error so + # the cursor's polling loop terminates with the right + # exception class — matches the Thrift backend's + # behaviour on TOperationState::ERROR_STATE. + raise _reraise_kernel_error(failure) + return _STATE_TO_COMMAND_STATE.get(state, CommandState.FAILED) + + def get_execution_result( + self, + command_id: CommandId, + cursor: "Cursor", + ) -> "ResultSet": + with self._async_handles_lock: + handle = self._async_handles.get(command_id.guid) + if handle is None: + raise ProgrammingError( + "get_execution_result called for an unknown command_id; " + "the kernel backend only tracks async-submitted statements." + ) + try: + stream = handle.await_result() + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + return self._make_result_set(stream, cursor, command_id) + + # ── Metadata ─────────────────────────────────────────────────── + + def _make_result_set( + self, + kernel_handle: Any, + cursor: "Cursor", + command_id: CommandId, + ) -> "ResultSet": + """Build a ``KernelResultSet`` from any kernel handle. Used + by sync execute, ``get_execution_result``, and all metadata + paths to keep construction in one place.""" + return KernelResultSet( + connection=cursor.connection, + backend=self, + kernel_handle=kernel_handle, + command_id=command_id, + arraysize=cursor.arraysize, + buffer_size_bytes=cursor.buffer_size_bytes, + ) + + def _synthetic_command_id(self) -> CommandId: + """Metadata calls don't produce a server statement id; mint + a synthetic UUID so the ``ResultSet`` still has a stable + identifier the cursor can attribute logs to. + + Plain ``uuid.uuid4().hex`` (no prefix) — anything that + consumes ``cursor.query_id`` downstream (telemetry, log + ingestion) sees a UUID-shaped string rather than a + connector-internal magic prefix it cannot parse.""" + return CommandId.from_sea_statement_id(uuid.uuid4().hex) + + def get_catalogs( + self, + session_id: SessionId, + max_rows: int, + max_bytes: int, + cursor: "Cursor", + ) -> "ResultSet": + if self._kernel_session is None: + raise InterfaceError("get_catalogs requires an open session.") + try: + stream = self._kernel_session.metadata().list_catalogs() + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + return self._make_result_set(stream, cursor, self._synthetic_command_id()) + + def get_schemas( + self, + session_id: SessionId, + max_rows: int, + max_bytes: int, + cursor: "Cursor", + catalog_name: Optional[str] = None, + schema_name: Optional[str] = None, + ) -> "ResultSet": + if self._kernel_session is None: + raise InterfaceError("get_schemas requires an open session.") + try: + stream = self._kernel_session.metadata().list_schemas( + catalog=catalog_name, + schema_pattern=schema_name, + ) + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + return self._make_result_set(stream, cursor, self._synthetic_command_id()) + + def get_tables( + self, + session_id: SessionId, + max_rows: int, + max_bytes: int, + cursor: "Cursor", + catalog_name: Optional[str] = None, + schema_name: Optional[str] = None, + table_name: Optional[str] = None, + table_types: Optional[List[str]] = None, + ) -> "ResultSet": + if self._kernel_session is None: + raise InterfaceError("get_tables requires an open session.") + try: + stream = self._kernel_session.metadata().list_tables( + catalog=catalog_name, + schema_pattern=schema_name, + table_pattern=table_name, + table_types=table_types, + ) + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + if not table_types: + return self._make_result_set(stream, cursor, self._synthetic_command_id()) + # The kernel today returns the unfiltered ``SHOW TABLES`` shape + # regardless of ``table_types``. Drain to a single Arrow table + # and apply the same client-side filter the native SEA backend + # uses (column index 5 is TABLE_TYPE, case-sensitive). Cheap + # because metadata result sets are small. + from databricks.sql.backend.sea.utils.filters import ResultSetFilter + + full_table = _drain_kernel_handle(stream) + filtered_table = ResultSetFilter._filter_arrow_table( + full_table, + column_name=full_table.schema.field(5).name, + allowed_values=table_types, + case_sensitive=True, + ) + return self._make_result_set( + _StaticArrowHandle(filtered_table), + cursor, + self._synthetic_command_id(), + ) + + def get_columns( + self, + session_id: SessionId, + max_rows: int, + max_bytes: int, + cursor: "Cursor", + catalog_name: Optional[str] = None, + schema_name: Optional[str] = None, + table_name: Optional[str] = None, + column_name: Optional[str] = None, + ) -> "ResultSet": + if self._kernel_session is None: + raise InterfaceError("get_columns requires an open session.") + if not catalog_name: + # Kernel's list_columns requires a catalog (SEA `SHOW + # COLUMNS` cannot span catalogs). Surface the constraint + # explicitly rather than letting the kernel error. + raise ProgrammingError( + "get_columns requires catalog_name on the kernel backend." + ) + try: + stream = self._kernel_session.metadata().list_columns( + catalog=catalog_name, + schema_pattern=schema_name, + table_pattern=table_name, + column_pattern=column_name, + ) + except _kernel.KernelError as exc: + raise _reraise_kernel_error(exc) + return self._make_result_set(stream, cursor, self._synthetic_command_id()) + + # ── Misc ─────────────────────────────────────────────────────── + + @property + def max_download_threads(self) -> int: + # CloudFetch parallelism lives kernel-side. This property is + # consulted by Thrift code paths that don't run for + # use_kernel=True; return a non-zero default so anything that + # peeks at it does not divide by zero. + return 10 + + +_STATE_TO_COMMAND_STATE: Dict[str, CommandState] = { + "Pending": CommandState.PENDING, + "Running": CommandState.RUNNING, + "Succeeded": CommandState.SUCCEEDED, + "Failed": CommandState.FAILED, + "Cancelled": CommandState.CANCELLED, + "Closed": CommandState.CLOSED, +} + + +def _drain_kernel_handle(handle: Any) -> Any: + """Drain a kernel ResultStream / ExecutedStatement into a single + ``pyarrow.Table``. Used by ``get_tables`` to apply a client-side + ``table_types`` filter on a metadata result; cheap because + metadata streams are small.""" + import pyarrow + + schema = handle.arrow_schema() + batches = [] + while True: + batch = handle.fetch_next_batch() + if batch is None: + break + if batch.num_rows > 0: + batches.append(batch) + try: + handle.close() + except _kernel.KernelError: + pass + return pyarrow.Table.from_batches(batches, schema=schema) + + +class _StaticArrowHandle: + """Duck-typed kernel handle that replays a pre-built + ``pyarrow.Table`` through ``arrow_schema()`` / + ``fetch_next_batch()`` / ``close()``. Used to wrap a + post-processed table (e.g., the ``table_types``-filtered output + of ``get_tables``) so it flows back through the normal + ``KernelResultSet`` path.""" + + def __init__(self, table: Any) -> None: + self._schema = table.schema + self._batches = list(table.to_batches()) + self._idx = 0 + + def arrow_schema(self) -> Any: + return self._schema + + def fetch_next_batch(self) -> Optional[Any]: + if self._idx >= len(self._batches): + return None + batch = self._batches[self._idx] + self._idx += 1 + return batch + + def close(self) -> None: + self._batches = [] diff --git a/src/databricks/sql/backend/kernel/result_set.py b/src/databricks/sql/backend/kernel/result_set.py new file mode 100644 index 000000000..2cc665656 --- /dev/null +++ b/src/databricks/sql/backend/kernel/result_set.py @@ -0,0 +1,247 @@ +"""Streaming ``ResultSet`` over a kernel ``ExecutedStatement`` or +``ResultStream``. + +The kernel surfaces two flavours of result-bearing handle: + +- ``ExecutedStatement`` — returned by ``Statement.execute()``. Has a + ``statement_id`` and a ``cancel()`` method. +- ``ResultStream`` — returned by ``Session.metadata().list_*`` and by + ``ExecutedAsyncStatement.await_result()``. No statement id; no + cancel. + +Both implement the same three methods this class actually calls: +``arrow_schema() / fetch_next_batch() / close()``. ``KernelResultSet`` +takes either via the ``kernel_handle`` parameter and treats them +uniformly — the connector's ``ResultSet`` contract doesn't need to +distinguish them. + +Buffer shape mirrors the prior ADBC POC's ``AdbcResultSet``: a FIFO +of pyarrow ``RecordBatch``es, fed one batch at a time from the +kernel as the connector calls ``fetch*``. ``fetchmany(n)`` slices +within a batch when ``n`` is smaller than the kernel's natural +batch size; ``fetchall`` drains the whole stream. + +Note: ``buffer_size_bytes`` is accepted by the constructor for +contract compatibility with the base ``ResultSet`` but is not +consulted — the kernel backend currently caps buffering by rows +pulled, not bytes. Memory ceilings should be controlled by the +kernel-side batch sizing. +""" + +from __future__ import annotations + +import logging +from collections import deque +from typing import Any, Deque, List, Optional, TYPE_CHECKING + +import pyarrow + +from databricks.sql.backend.kernel.type_mapping import description_from_arrow_schema +from databricks.sql.backend.types import CommandId, CommandState +from databricks.sql.result_set import ResultSet +from databricks.sql.types import Row + +if TYPE_CHECKING: + from databricks.sql.client import Connection + from databricks.sql.backend.kernel.client import KernelDatabricksClient + +logger = logging.getLogger(__name__) + + +class KernelResultSet(ResultSet): + """Streaming ``ResultSet`` over a kernel handle. + + The ``kernel_handle`` is duck-typed: it must implement + ``arrow_schema() -> pyarrow.Schema``, ``fetch_next_batch() -> + Optional[pyarrow.RecordBatch]``, and ``close() -> None``. + Both ``databricks_sql_kernel.ExecutedStatement`` and + ``databricks_sql_kernel.ResultStream`` satisfy that contract. + """ + + def __init__( + self, + connection: "Connection", + backend: "KernelDatabricksClient", + kernel_handle: Any, + command_id: CommandId, + arraysize: int, + buffer_size_bytes: int, + ): + schema = kernel_handle.arrow_schema() + super().__init__( + connection=connection, + backend=backend, + arraysize=arraysize, + buffer_size_bytes=buffer_size_bytes, + command_id=command_id, + status=CommandState.RUNNING, + has_been_closed_server_side=False, + has_more_rows=True, + results_queue=None, + description=description_from_arrow_schema(schema), + is_staging_operation=False, + lz4_compressed=False, + arrow_schema_bytes=None, + ) + self._kernel_handle = kernel_handle + self._schema: pyarrow.Schema = schema + # FIFO of record batches plus a per-head row offset, so + # partial fetches (fetchmany(n) for n < batch_size) don't + # re-fetch from the kernel. + self._buffer: Deque[pyarrow.RecordBatch] = deque() + self._buffer_offset: int = 0 + # Running count of rows currently buffered (sum of batch + # sizes minus the head-batch offset). Maintained by + # _pull_one_batch / _take_buffered / _drain so _buffered_rows + # stays O(1) instead of walking the deque. + self._buffered_count: int = 0 + self._exhausted: bool = False + + # ----- internal helpers ----- + + def _pull_one_batch(self) -> bool: + """Pull the next batch from the kernel into the local buffer. + Returns True if a batch was added; False if the kernel side + is exhausted.""" + if self._exhausted: + return False + batch = self._kernel_handle.fetch_next_batch() + if batch is None: + self._exhausted = True + self.has_more_rows = False + self.status = CommandState.SUCCEEDED + return False + if batch.num_rows > 0: + self._buffer.append(batch) + self._buffered_count += batch.num_rows + return True + + def _ensure_buffered(self, n_rows: int) -> int: + """Pull batches until ``n_rows`` are buffered or the kernel + is exhausted. Returns total rows currently buffered.""" + while self._buffered_count < n_rows: + if not self._pull_one_batch(): + break + return self._buffered_count + + def _buffered_rows(self) -> int: + return self._buffered_count + + def _take_buffered(self, n: int) -> pyarrow.Table: + """Slice up to ``n`` rows out of the buffer; advances state.""" + slices: List[pyarrow.RecordBatch] = [] + remaining = n + while remaining > 0 and self._buffer: + head = self._buffer[0] + avail = head.num_rows - self._buffer_offset + take = min(avail, remaining) + slices.append(head.slice(self._buffer_offset, take)) + self._buffer_offset += take + remaining -= take + if self._buffer_offset >= head.num_rows: + self._buffer.popleft() + self._buffer_offset = 0 + taken = n - remaining + self._buffered_count -= taken + self._next_row_index += taken + if not slices: + return pyarrow.Table.from_batches([], schema=self._schema) + return pyarrow.Table.from_batches(slices, schema=self._schema) + + def _drain(self) -> pyarrow.Table: + """Consume everything left in the buffer + kernel stream + and return as a single Table.""" + chunks: List[pyarrow.RecordBatch] = [] + if self._buffer and self._buffer_offset > 0: + head = self._buffer.popleft() + chunks.append( + head.slice(self._buffer_offset, head.num_rows - self._buffer_offset) + ) + self._buffer_offset = 0 + while self._buffer: + chunks.append(self._buffer.popleft()) + if not self._exhausted: + while True: + batch = self._kernel_handle.fetch_next_batch() + if batch is None: + self._exhausted = True + self.has_more_rows = False + self.status = CommandState.SUCCEEDED + break + if batch.num_rows > 0: + chunks.append(batch) + rows = sum(c.num_rows for c in chunks) + self._buffered_count = 0 + self._next_row_index += rows + if not chunks: + return pyarrow.Table.from_batches([], schema=self._schema) + return pyarrow.Table.from_batches(chunks, schema=self._schema) + + # ----- Arrow fetches ----- + + def fetchall_arrow(self) -> pyarrow.Table: + return self._drain() + + def fetchmany_arrow(self, size: int) -> pyarrow.Table: + if size < 0: + raise ValueError(f"fetchmany_arrow size must be >= 0, got {size}") + if size == 0: + return pyarrow.Table.from_batches([], schema=self._schema) + self._ensure_buffered(size) + return self._take_buffered(size) + + # ----- Row fetches ----- + + def fetchone(self) -> Optional[Row]: + self._ensure_buffered(1) + if self._buffered_rows() == 0: + return None + table = self._take_buffered(1) + rows = self._convert_arrow_table(table) + return rows[0] if rows else None + + def fetchmany(self, size: int) -> List[Row]: + if size < 0: + raise ValueError(f"fetchmany size must be >= 0, got {size}") + if size == 0: + return [] + self._ensure_buffered(size) + table = self._take_buffered(size) + return self._convert_arrow_table(table) + + def fetchall(self) -> List[Row]: + return self._convert_arrow_table(self._drain()) + + def close(self) -> None: + """Close the underlying kernel handle. Idempotent — the + kernel's own ``close()`` is idempotent, and we guard against + repeated calls so partially-drained streams don't double- + decrement reference counts.""" + if self._kernel_handle is None: + return + try: + self._kernel_handle.close() + except Exception as exc: + # close() failures are not actionable at the connector + # level; log and swallow so the cursor's __del__ / + # connection close path stays clean. + logger.warning("Error closing kernel handle: %s", exc) + # Drop the entry from the backend's async-handle map (if + # present) — for async-submitted statements the handle is + # tracked there and the base ``ResultSet.close`` path would + # otherwise leave a stale entry pointing at a closed handle. + # No-op for the sync-execute and metadata paths, which never + # register in ``_async_handles``. + guid = getattr(self.command_id, "guid", None) + if guid is not None: + self.backend._async_handles_lock.acquire() + try: + self.backend._async_handles.pop(guid, None) + finally: + self.backend._async_handles_lock.release() + self._buffer.clear() + self._buffered_count = 0 + self._kernel_handle = None + self._exhausted = True + self.has_been_closed_server_side = True + self.status = CommandState.CLOSED diff --git a/src/databricks/sql/backend/kernel/type_mapping.py b/src/databricks/sql/backend/kernel/type_mapping.py new file mode 100644 index 000000000..a91160d17 --- /dev/null +++ b/src/databricks/sql/backend/kernel/type_mapping.py @@ -0,0 +1,79 @@ +"""Arrow ↔ PEP 249 type translation for the kernel backend. + +The kernel returns results as pyarrow ``Schema`` / ``RecordBatch``; +PEP 249 ``cursor.description`` is a list of 7-tuples with a +type-name string per column. ``description_from_arrow_schema`` +flattens the conversion so ``KernelResultSet`` and any future +kernel-result wrapper share the same mapping. + +Parameter binding (``TSparkParameter`` → kernel ``TypedValue``) is +not yet implemented — the PyO3 ``Statement`` doesn't expose a +``bind_param`` method on this branch. It'll land in a follow-up +once that PyO3 surface ships. +""" + +from __future__ import annotations + +from typing import List, Tuple + +import pyarrow + + +def _arrow_type_to_dbapi_string(arrow_type: pyarrow.DataType) -> str: + """Map a pyarrow type to the Databricks SQL type name used in + PEP 249 ``description``. Names match what the Thrift backend + produces so consumers can branch on them identically. + """ + if pyarrow.types.is_boolean(arrow_type): + return "boolean" + if pyarrow.types.is_int8(arrow_type): + return "tinyint" + if pyarrow.types.is_int16(arrow_type): + return "smallint" + if pyarrow.types.is_int32(arrow_type): + return "int" + if pyarrow.types.is_int64(arrow_type): + return "bigint" + if pyarrow.types.is_float32(arrow_type): + return "float" + if pyarrow.types.is_float64(arrow_type): + return "double" + if pyarrow.types.is_decimal(arrow_type): + return "decimal" + if pyarrow.types.is_string(arrow_type) or pyarrow.types.is_large_string(arrow_type): + return "string" + if pyarrow.types.is_binary(arrow_type) or pyarrow.types.is_large_binary(arrow_type): + return "binary" + if pyarrow.types.is_date(arrow_type): + return "date" + if pyarrow.types.is_timestamp(arrow_type): + return "timestamp" + if pyarrow.types.is_list(arrow_type) or pyarrow.types.is_large_list(arrow_type): + return "array" + if pyarrow.types.is_struct(arrow_type): + return "struct" + if pyarrow.types.is_map(arrow_type): + return "map" + return str(arrow_type) + + +def description_from_arrow_schema(schema: pyarrow.Schema) -> List[Tuple]: + """Build a PEP 249 ``description`` list from a pyarrow Schema. + + Each tuple is ``(name, type_code, display_size, internal_size, + precision, scale, null_ok)``. The kernel does not report the + last five so they're all ``None`` — same shape the existing + ADBC / Thrift result paths produce. + """ + return [ + ( + field.name, + _arrow_type_to_dbapi_string(field.type), + None, + None, + None, + None, + None, + ) + for field in schema + ] diff --git a/src/databricks/sql/client.py b/src/databricks/sql/client.py index fe52f0c79..e3c25fe65 100755 --- a/src/databricks/sql/client.py +++ b/src/databricks/sql/client.py @@ -115,7 +115,17 @@ def __init__( Parameters: :param use_sea: `bool`, optional (default is False) - Use the SEA backend instead of the Thrift backend. + Use the native pure-Python SEA backend instead of + the Thrift backend. + :param use_kernel: `bool`, optional (default is False) + Route the connection through the Rust kernel + (``databricks-sql-kernel`` via PyO3). Requires the + kernel wheel to be installed separately + (``pip install databricks-sql-kernel``); raises + ImportError otherwise. In active development — + PAT auth only today; OAuth / federation / external + credentials and native parameter binding land in + follow-ups. Mutually exclusive with ``use_sea``. :param use_hybrid_disposition: `bool`, optional (default is False) Use the hybrid disposition instead of the inline disposition. :param server_hostname: Databricks instance host name. @@ -1575,6 +1585,12 @@ def columns( Get columns corresponding to the catalog_name, schema_name, table_name and column_name. Names can contain % wildcards. + + Note: on ``use_kernel=True``, ``catalog_name`` is required — + the kernel's underlying ``SHOW COLUMNS`` cannot span catalogs. + Passing ``catalog_name=None`` raises ``ProgrammingError``. The + Thrift and native SEA backends accept ``catalog_name=None``. + :returns self """ self._check_not_closed() diff --git a/src/databricks/sql/session.py b/src/databricks/sql/session.py index 65c0d6aca..97790e4d9 100644 --- a/src/databricks/sql/session.py +++ b/src/databricks/sql/session.py @@ -122,6 +122,31 @@ def _create_backend( ) -> DatabricksClient: """Create and return the appropriate backend client.""" self.use_sea = kwargs.get("use_sea", False) + self.use_kernel = kwargs.get("use_kernel", False) + + if self.use_kernel and self.use_sea: + raise ValueError( + "use_kernel and use_sea are mutually exclusive — pick one." + ) + + if self.use_kernel: + # Lazy import so the connector doesn't ImportError at + # startup when the kernel wheel isn't installed — the + # error surfaces only when a caller actually requests + # use_kernel=True. + from databricks.sql.backend.kernel.client import KernelDatabricksClient + + logger.debug("Creating kernel-backed client for use_kernel=True") + return KernelDatabricksClient( + server_hostname=server_hostname, + http_path=http_path, + http_headers=all_headers, + auth_provider=auth_provider, + ssl_options=self.ssl_options, + http_client=self.http_client, + catalog=kwargs.get("catalog"), + schema=kwargs.get("schema"), + ) databricks_client_class: Type[DatabricksClient] if self.use_sea: diff --git a/tests/e2e/test_kernel_backend.py b/tests/e2e/test_kernel_backend.py new file mode 100644 index 000000000..0c0722b91 --- /dev/null +++ b/tests/e2e/test_kernel_backend.py @@ -0,0 +1,188 @@ +"""E2E tests for ``use_kernel=True`` (routes through the Rust kernel +via the PyO3 ``databricks_sql_kernel`` module). + +PAT auth only. Anything else surfaces as ``NotSupportedError`` +from the auth bridge — covered as a unit test, not exercised here. + +Skipped automatically when: + - The standard ``DATABRICKS_SERVER_HOSTNAME`` / ``HTTP_PATH`` / + ``TOKEN`` creds aren't set (existing connector convention). + - ``databricks_sql_kernel`` isn't importable (the wheel hasn't + been installed; run ``pip install databricks-sql-kernel`` or, + for local dev, + ``cd databricks-sql-kernel/pyo3 && maturin develop --release`` + into this venv). + +Run from the connector repo root: + + set -a && source ~/.databricks/pecotesting-creds && set +a + .venv/bin/pytest tests/e2e/test_kernel_backend.py -v +""" + +from __future__ import annotations + +import pytest + +import databricks.sql as sql +from databricks.sql.exc import DatabaseError + + +# Skip the whole module unless the kernel wheel is importable. +pytest.importorskip( + "databricks_sql_kernel", + reason="use_kernel=True requires the databricks-sql-kernel package", +) + + +@pytest.fixture(scope="module") +def kernel_conn_params(connection_details): + """Live-cred check + connection params for use_kernel=True. + + Skips the module if any cred is missing rather than letting + every test fail with a confusing connect-time error. + """ + host = connection_details.get("host") + http_path = connection_details.get("http_path") + token = connection_details.get("access_token") + if not (host and http_path and token): + pytest.skip( + "DATABRICKS_SERVER_HOSTNAME / DATABRICKS_HTTP_PATH / " + "DATABRICKS_TOKEN not set" + ) + return { + "server_hostname": host, + "http_path": http_path, + "access_token": token, + "use_kernel": True, + } + + +@pytest.fixture +def conn(kernel_conn_params): + """One-shot connection per test (the simple_test pattern the + existing e2e suite uses for cursor-level tests).""" + c = sql.connect(**kernel_conn_params) + try: + yield c + finally: + c.close() + + +def test_connect_with_use_kernel_opens_a_session(conn): + assert conn.open, "connection should report open after connect()" + + +def test_select_one(conn): + with conn.cursor() as cur: + cur.execute("SELECT 1 AS n") + assert cur.description[0][0] == "n" + # description type slug matches what Thrift produces + assert cur.description[0][1] == "int" + rows = cur.fetchall() + assert len(rows) == 1 + assert rows[0][0] == 1 + + +def test_drain_large_range_to_arrow(conn): + """SELECT * FROM range(10000) drains as a pyarrow Table with + 10000 rows. Exercises end-of-stream drain over multiple + ``fetch_next_batch`` calls; not large enough to cross a + CloudFetch chunk boundary — see test_driver for CloudFetch + coverage.""" + with conn.cursor() as cur: + cur.execute("SELECT * FROM range(10000)") + rows = cur.fetchall() + assert len(rows) == 10000 + + +def test_fetchmany_pacing(conn): + """fetchmany honours the requested size and stops cleanly at + end-of-stream — covers the buffer-slicing logic in + KernelResultSet.""" + with conn.cursor() as cur: + cur.execute("SELECT * FROM range(50)") + r1 = cur.fetchmany(10) + r2 = cur.fetchmany(20) + r3 = cur.fetchmany(100) # capped at remaining + assert (len(r1), len(r2), len(r3)) == (10, 20, 20) + + +def test_fetchall_arrow(conn): + with conn.cursor() as cur: + cur.execute("SELECT 1 AS a, 'hi' AS b") + table = cur.fetchall_arrow() + assert table.num_rows == 1 + assert table.column_names == ["a", "b"] + + +# ── Metadata ────────────────────────────────────────────────────── + + +def test_metadata_catalogs(conn): + with conn.cursor() as cur: + cur.catalogs() + rows = cur.fetchall() + assert len(rows) > 0 + + +def test_metadata_schemas(conn): + with conn.cursor() as cur: + cur.schemas(catalog_name="main") + rows = cur.fetchall() + assert len(rows) > 0 + + +def test_metadata_tables(conn): + with conn.cursor() as cur: + cur.tables(catalog_name="system", schema_name="information_schema") + rows = cur.fetchall() + assert len(rows) > 0 + + +def test_metadata_columns(conn): + with conn.cursor() as cur: + cur.columns( + catalog_name="system", + schema_name="information_schema", + table_name="tables", + ) + rows = cur.fetchall() + assert len(rows) > 0 + + +# ── Session configuration ───────────────────────────────────────── + + +def test_session_configuration_round_trips(kernel_conn_params): + """`session_configuration` flows through to the kernel's + `session_conf` and is honoured by the server. + + `ANSI_MODE` is the safe choice — it's on the SEA allow-list and + isn't workspace-policy-clamped (unlike `STATEMENT_TIMEOUT`) or + rejected by the warehouse (unlike `TIMEZONE` on dogfood).""" + params = dict(kernel_conn_params) + params["session_configuration"] = {"ANSI_MODE": "false"} + with sql.connect(**params) as c: + with c.cursor() as cur: + cur.execute("SET ANSI_MODE") + rows = cur.fetchall() + kv = {r[0]: r[1] for r in rows} + assert kv.get("ANSI_MODE") == "false", f"got {rows!r}" + + +# ── Error mapping ───────────────────────────────────────────────── + + +def test_bad_sql_surfaces_as_databaseerror(conn): + """Bad SQL should surface as a PEP 249 ``DatabaseError`` with + the kernel's structured fields (`code`, `sql_state`, `query_id`) + attached as attributes — the connector backend re-raises the + kernel's ``SqlError`` to ``DatabaseError`` while preserving the + server-reported state.""" + with conn.cursor() as cur: + with pytest.raises(DatabaseError) as exc_info: + cur.execute("SELECT * FROM definitely_not_a_table_xyz_kernel_e2e") + err = exc_info.value + # Structured fields copied off the kernel exception: + assert getattr(err, "code", None) == "SqlError" + assert getattr(err, "sql_state", None) == "42P01" diff --git a/tests/unit/test_kernel_auth_bridge.py b/tests/unit/test_kernel_auth_bridge.py new file mode 100644 index 000000000..a5e2e756b --- /dev/null +++ b/tests/unit/test_kernel_auth_bridge.py @@ -0,0 +1,137 @@ +"""Unit tests for the kernel backend's auth bridge. + +Phase 1 ships PAT only. Tests verify: + - PAT routes through ``auth_type='pat'``. + - ``TokenFederationProvider``-wrapped PAT also routes through + PAT (every provider built by ``get_python_sql_connector_auth_provider`` + is federation-wrapped, so the naive isinstance check has to + look through the wrapper). + - Anything else raises ``NotSupportedError`` with a clear message. +""" + +from __future__ import annotations + +from unittest.mock import Mock + +import pytest + +# auth_bridge.py itself has no pyarrow or kernel-wheel deps. The +# `databricks.sql.backend.kernel` package's __init__.py deliberately +# does *not* eagerly re-export from .client either (which would +# require the kernel wheel). So this test can run on the +# default-deps CI matrix without any extras. No importorskip needed. + +from databricks.sql.auth.authenticators import ( + AccessTokenAuthProvider, + AuthProvider, + DatabricksOAuthProvider, + ExternalAuthProvider, +) +from databricks.sql.backend.kernel.auth_bridge import ( + _extract_bearer_token, + kernel_auth_kwargs, +) +from databricks.sql.exc import NotSupportedError + + +class _FakeOAuthProvider(AuthProvider): + """Stand-in for any non-PAT provider. The bridge should reject + these with NotSupportedError.""" + + def add_headers(self, request_headers): + request_headers["Authorization"] = "Bearer oauth-token-xyz" + + +class _MalformedProvider(AuthProvider): + """Provider that returns a non-Bearer Authorization header.""" + + def add_headers(self, request_headers): + request_headers["Authorization"] = "Basic dXNlcjpwYXNz" + + +class _SilentProvider(AuthProvider): + """Provider that writes nothing — misconfigured auth.""" + + def add_headers(self, request_headers): + pass + + +class TestExtractBearerToken: + def test_pat_provider_returns_token(self): + p = AccessTokenAuthProvider("dapi-abc-123") + assert _extract_bearer_token(p) == "dapi-abc-123" + + def test_non_bearer_auth_returns_none(self): + assert _extract_bearer_token(_MalformedProvider()) is None + + def test_silent_provider_returns_none(self): + assert _extract_bearer_token(_SilentProvider()) is None + + +class TestKernelAuthKwargs: + def test_pat_routes_to_kernel_pat(self): + kwargs = kernel_auth_kwargs(AccessTokenAuthProvider("dapi-xyz")) + assert kwargs == {"auth_type": "pat", "access_token": "dapi-xyz"} + + def test_federation_wrapped_pat_routes_to_kernel_pat(self): + """``get_python_sql_connector_auth_provider`` always wraps + the base provider in a ``TokenFederationProvider``, so the + PAT case never reaches us unwrapped in practice. The bridge + must look through the federation wrapper to find the + underlying ``AccessTokenAuthProvider``. + + Construct a real ``TokenFederationProvider`` (with a mock + http_client — `_exchange_token` never fires for a plain + ``dapi-…`` PAT because it isn't a JWT, so the mock is never + called). This exercises the real ``add_headers`` path the + bridge sees in production. + """ + from databricks.sql.auth.token_federation import TokenFederationProvider + + base = AccessTokenAuthProvider("dapi-abc") + federated = TokenFederationProvider( + hostname="https://example.cloud.databricks.com", + external_provider=base, + http_client=Mock(), + ) + kwargs = kernel_auth_kwargs(federated) + assert kwargs == {"auth_type": "pat", "access_token": "dapi-abc"} + + def test_pat_with_silent_provider_raises_value_error(self): + """An AccessTokenAuthProvider that produces no Authorization + header is misconfigured; surface that at bridge-build time, + not on the first kernel HTTP request.""" + broken = AccessTokenAuthProvider("dapi-x") + broken.add_headers = lambda h: None # type: ignore[method-assign] + with pytest.raises(ValueError, match="Bearer"): + kernel_auth_kwargs(broken) + + def test_generic_oauth_provider_raises_not_supported(self): + with pytest.raises(NotSupportedError, match="only supports PAT"): + kernel_auth_kwargs(_FakeOAuthProvider()) + + def test_external_credentials_provider_raises_not_supported(self): + """``ExternalAuthProvider`` wraps user-supplied + credentials_provider — kernel doesn't accept these today, + and the bridge surfaces that explicitly.""" + # ExternalAuthProvider's __init__ calls the credentials + # provider; supply a noop one. + from databricks.sql.auth.authenticators import CredentialsProvider + + class _NoopCreds(CredentialsProvider): + def auth_type(self): + return "noop" + + def __call__(self, *args, **kwargs): + return lambda: {"Authorization": "Bearer noop"} + + ext = ExternalAuthProvider(_NoopCreds()) + with pytest.raises(NotSupportedError, match="only supports PAT"): + kernel_auth_kwargs(ext) + + def test_silent_non_pat_provider_also_raises_not_supported(self): + """Even if a non-PAT provider produces no header, the bridge + rejects the type itself — we don't try to extract a token + from something we already know is unsupported.""" + with pytest.raises(NotSupportedError): + kernel_auth_kwargs(_SilentProvider()) diff --git a/tests/unit/test_kernel_client.py b/tests/unit/test_kernel_client.py new file mode 100644 index 000000000..b23365c6e --- /dev/null +++ b/tests/unit/test_kernel_client.py @@ -0,0 +1,397 @@ +"""Unit tests for ``KernelDatabricksClient`` — the error mapping, +state-mapping, async-handle bookkeeping, and method-level guards +that don't require a live kernel session. + +The connector's ``databricks.sql.backend.kernel.client`` module +imports the ``databricks_sql_kernel`` extension at import time, so +this test installs a fake module into ``sys.modules`` *before* +importing the client. The fake exposes the minimum surface the +client touches (``Session``, ``KernelError``, ``Statement``, +``ExecutedStatement``, ``ExecutedAsyncStatement``, ``ResultStream``, +``metadata``). +""" + +from __future__ import annotations + +import sys +import types +from typing import Optional +from unittest.mock import MagicMock + +import pytest + +# pyarrow is an optional dep; the kernel client's result_set imports +# it eagerly, so the whole module must skip when pyarrow is missing. +pa = pytest.importorskip("pyarrow") + + +# --------------------------------------------------------------------------- +# Fake databricks_sql_kernel module — installed before client.py imports. +# --------------------------------------------------------------------------- + + +class _FakeKernelError(Exception): + """Stand-in for ``databricks_sql_kernel.KernelError``. Carries + the structured attrs the connector forwards onto the re-raised + PEP 249 exception.""" + + def __init__( + self, + code: str = "Unknown", + message: str = "boom", + sql_state: Optional[str] = None, + query_id: Optional[str] = None, + ) -> None: + super().__init__(message) + self.code = code + self.message = message + self.sql_state = sql_state + self.error_code = None + self.vendor_code = None + self.http_status = None + self.retryable = False + self.query_id = query_id + + +_fake_kernel_module = types.ModuleType("databricks_sql_kernel") +_fake_kernel_module.KernelError = _FakeKernelError # type: ignore[attr-defined] +_fake_kernel_module.Session = MagicMock() # type: ignore[attr-defined] +sys.modules.setdefault("databricks_sql_kernel", _fake_kernel_module) + + +# Importing the client now picks up the fake module via +# ``import databricks_sql_kernel as _kernel`` at the top of client.py. +from databricks.sql.auth.authenticators import AccessTokenAuthProvider +from databricks.sql.backend.kernel import client as kernel_client +from databricks.sql.backend.types import CommandId, CommandState +from databricks.sql.exc import ( + DatabaseError, + InterfaceError, + NotSupportedError, + OperationalError, + ProgrammingError, +) + + +# --------------------------------------------------------------------------- +# Error mapping +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "code, expected_cls", + [ + ("InvalidArgument", ProgrammingError), + ("Unauthenticated", OperationalError), + ("PermissionDenied", OperationalError), + ("NotFound", ProgrammingError), + ("ResourceExhausted", OperationalError), + ("Unavailable", OperationalError), + ("Timeout", OperationalError), + ("Cancelled", OperationalError), + ("DataLoss", DatabaseError), + ("Internal", DatabaseError), + ("InvalidStatementHandle", ProgrammingError), + ("NetworkError", OperationalError), + ("SqlError", DatabaseError), + ("Unknown", DatabaseError), + ], +) +def test_code_to_exception_mapping(code, expected_cls): + """Every entry in ``_CODE_TO_EXCEPTION`` maps to the documented + PEP 249 class.""" + err = _FakeKernelError(code=code, message=f"{code} boom") + out = kernel_client._reraise_kernel_error(err) + assert isinstance(out, expected_cls) + assert "boom" in str(out) + assert out.__cause__ is err + + +def test_unknown_code_falls_back_to_database_error(): + err = _FakeKernelError(code="SomethingNew", message="…") + out = kernel_client._reraise_kernel_error(err) + assert isinstance(out, DatabaseError) + + +def test_reraise_forwards_structured_attributes(): + err = _FakeKernelError( + code="SqlError", + message="table not found", + sql_state="42P01", + query_id="q-123", + ) + out = kernel_client._reraise_kernel_error(err) + assert out.code == "SqlError" + assert out.sql_state == "42P01" + assert out.query_id == "q-123" + # Optional fields default to None on the source exception and + # come through verbatim on the re-raised side. + for attr in ("error_code", "vendor_code", "http_status"): + assert getattr(out, attr) is None + assert out.retryable is False + + +# --------------------------------------------------------------------------- +# State mapping +# --------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "kernel_state, expected", + [ + ("Pending", CommandState.PENDING), + ("Running", CommandState.RUNNING), + ("Succeeded", CommandState.SUCCEEDED), + ("Failed", CommandState.FAILED), + ("Cancelled", CommandState.CANCELLED), + ("Closed", CommandState.CLOSED), + ], +) +def test_state_to_command_state_mapping(kernel_state, expected): + assert kernel_client._STATE_TO_COMMAND_STATE[kernel_state] == expected + + +# --------------------------------------------------------------------------- +# Client lifecycle / guards (no live session) +# --------------------------------------------------------------------------- + + +def _make_client() -> kernel_client.KernelDatabricksClient: + """Build a client with a PAT auth provider; the kernel ``Session`` + isn't opened until ``open_session`` runs.""" + return kernel_client.KernelDatabricksClient( + server_hostname="example.cloud.databricks.com", + http_path="/sql/1.0/warehouses/abc", + auth_provider=AccessTokenAuthProvider("dapi-test"), + ssl_options=None, + ) + + +def test_no_open_session_guards_raise_interface_error(): + """Every method that depends on an open kernel session must + raise ``InterfaceError`` before any kernel call.""" + c = _make_client() + cursor = MagicMock() + cursor.arraysize = 100 + cursor.buffer_size_bytes = 1024 + + with pytest.raises(InterfaceError, match="open session"): + c.execute_command( + operation="SELECT 1", + session_id=MagicMock(), + max_rows=1, + max_bytes=1, + lz4_compression=False, + cursor=cursor, + use_cloud_fetch=False, + parameters=[], + async_op=False, + enforce_embedded_schema_correctness=False, + ) + + for method, kwargs in [ + ("get_catalogs", {}), + ("get_schemas", {}), + ("get_tables", {}), + ("get_columns", {"catalog_name": "main"}), + ]: + with pytest.raises(InterfaceError): + getattr(c, method)( + session_id=MagicMock(), + max_rows=1, + max_bytes=1, + cursor=cursor, + **kwargs, + ) + + +def test_open_session_rejects_double_open(monkeypatch): + """Two ``open_session`` calls on the same client must fail — + the kernel session is bound to a single open call.""" + c = _make_client() + c._kernel_session = MagicMock() # pretend already open + with pytest.raises(InterfaceError, match="already has an open session"): + c.open_session(session_configuration=None, catalog=None, schema=None) + + +def test_execute_command_rejects_parameters(): + c = _make_client() + c._kernel_session = MagicMock() + cursor = MagicMock() + cursor.arraysize = 100 + cursor.buffer_size_bytes = 1024 + with pytest.raises(NotSupportedError, match="Parameter binding"): + c.execute_command( + operation="SELECT ?", + session_id=MagicMock(), + max_rows=1, + max_bytes=1, + lz4_compression=False, + cursor=cursor, + use_cloud_fetch=False, + parameters=[object()], # any non-empty list + async_op=False, + enforce_embedded_schema_correctness=False, + ) + + +def test_execute_command_rejects_query_tags(): + c = _make_client() + c._kernel_session = MagicMock() + cursor = MagicMock() + cursor.arraysize = 100 + cursor.buffer_size_bytes = 1024 + with pytest.raises(NotSupportedError, match="query_tags"): + c.execute_command( + operation="SELECT 1", + session_id=MagicMock(), + max_rows=1, + max_bytes=1, + lz4_compression=False, + cursor=cursor, + use_cloud_fetch=False, + parameters=[], + async_op=False, + enforce_embedded_schema_correctness=False, + query_tags={"team": "x"}, + ) + + +def test_get_columns_requires_catalog(): + c = _make_client() + c._kernel_session = MagicMock() + cursor = MagicMock() + cursor.arraysize = 100 + cursor.buffer_size_bytes = 1024 + with pytest.raises(ProgrammingError, match="catalog_name"): + c.get_columns( + session_id=MagicMock(), + max_rows=1, + max_bytes=1, + cursor=cursor, + catalog_name=None, + ) + + +# --------------------------------------------------------------------------- +# Async handle bookkeeping +# --------------------------------------------------------------------------- + + +def test_cancel_command_tolerant_when_handle_missing(): + """``cancel_command`` is documented to be a no-op when there's + no tracked async handle (matches Thrift's tolerance).""" + c = _make_client() + fake_command_id = CommandId.from_sea_statement_id("not-tracked") + c.cancel_command(fake_command_id) # must not raise + + +def test_close_command_tolerant_when_handle_missing(): + c = _make_client() + fake_command_id = CommandId.from_sea_statement_id("not-tracked") + c.close_command(fake_command_id) # must not raise + + +def test_get_query_state_returns_succeeded_when_handle_missing(): + """Sync-execute paths never register an async handle; by the + time ``get_query_state`` could be called the command is + terminal-by-construction. The client returns SUCCEEDED so the + cursor's polling loop terminates cleanly.""" + c = _make_client() + fake_command_id = CommandId.from_sea_statement_id("sync-only") + assert c.get_query_state(fake_command_id) == CommandState.SUCCEEDED + + +def test_get_execution_result_raises_for_unknown_command_id(): + """The kernel backend only tracks async-submitted statements; + a ``get_execution_result`` call for an unknown id is a + programming error.""" + c = _make_client() + fake_command_id = CommandId.from_sea_statement_id("unknown") + with pytest.raises(ProgrammingError, match="unknown command_id"): + c.get_execution_result(fake_command_id, cursor=MagicMock()) + + +def test_cancel_command_reraises_kernel_error(): + c = _make_client() + fake_handle = MagicMock() + fake_handle.cancel.side_effect = _FakeKernelError(code="Unavailable") + cid = CommandId.from_sea_statement_id("abc") + c._async_handles[cid.guid] = fake_handle + with pytest.raises(OperationalError): + c.cancel_command(cid) + + +def test_close_command_reraises_kernel_error(): + c = _make_client() + fake_handle = MagicMock() + fake_handle.close.side_effect = _FakeKernelError(code="Internal") + cid = CommandId.from_sea_statement_id("abc") + c._async_handles[cid.guid] = fake_handle + with pytest.raises(DatabaseError): + c.close_command(cid) + # The handle is popped before the kernel call, so a subsequent + # close_command is tolerantly a no-op. + c.close_command(cid) + + +def test_get_query_state_raises_on_failed_state_with_failure(): + c = _make_client() + fake_handle = MagicMock() + fake_handle.status.return_value = ( + "Failed", + _FakeKernelError(code="SqlError", message="bad"), + ) + cid = CommandId.from_sea_statement_id("abc") + c._async_handles[cid.guid] = fake_handle + with pytest.raises(DatabaseError, match="bad"): + c.get_query_state(cid) + + +def test_get_query_state_returns_state_when_no_failure(): + c = _make_client() + fake_handle = MagicMock() + fake_handle.status.return_value = ("Running", None) + cid = CommandId.from_sea_statement_id("abc") + c._async_handles[cid.guid] = fake_handle + assert c.get_query_state(cid) == CommandState.RUNNING + + +# --------------------------------------------------------------------------- +# Misc +# --------------------------------------------------------------------------- + + +def test_max_download_threads_is_nonzero(): + """Property is consulted by Thrift code paths that don't run for + ``use_kernel=True``; a non-zero default avoids divide-by-zero.""" + c = _make_client() + assert c.max_download_threads > 0 + + +def test_synthetic_command_id_is_uuid_shaped(): + """Synthetic metadata command IDs are plain hex UUIDs (no + ``metadata-`` prefix) so anything reading ``cursor.query_id`` + downstream sees a parseable shape.""" + c = _make_client() + cid = c._synthetic_command_id() + # 32-char lowercase hex + assert len(cid.guid) == 32 + int(cid.guid, 16) # raises if non-hex + + +def test_close_session_clears_async_handles_even_if_close_fails(): + """Per-handle close errors are logged but don't prevent the + rest of the close-session sweep from completing, and the dict + is cleared either way.""" + c = _make_client() + good = MagicMock() + bad = MagicMock() + bad.close.side_effect = _FakeKernelError(code="Unavailable") + c._async_handles["a"] = good + c._async_handles["b"] = bad + c._kernel_session = MagicMock() + c.close_session(MagicMock()) + assert c._async_handles == {} + assert good.close.called + assert bad.close.called diff --git a/tests/unit/test_kernel_result_set.py b/tests/unit/test_kernel_result_set.py new file mode 100644 index 000000000..c83bfce94 --- /dev/null +++ b/tests/unit/test_kernel_result_set.py @@ -0,0 +1,169 @@ +"""Unit tests for ``KernelResultSet`` — the buffer behavior + +close() semantics. Uses a fake kernel handle so tests run with no +network and no Rust extension dependency.""" + +from __future__ import annotations + +from collections import deque +from typing import Deque +from unittest.mock import MagicMock + +import pytest + +# pyarrow is an optional connector dep; the default-deps CI test +# job runs without it. KernelResultSet imports pyarrow eagerly, +# so the whole module must skip when pyarrow is unavailable. +pa = pytest.importorskip("pyarrow") + +from databricks.sql.backend.kernel.result_set import KernelResultSet +from databricks.sql.backend.types import CommandId, CommandState + + +class _FakeKernelHandle: + """Stand-in for ``databricks_sql_kernel.ExecutedStatement`` / + ``ResultStream``. Emits a configured list of ``RecordBatch``es + via ``fetch_next_batch`` and then returns ``None``.""" + + def __init__(self, schema: pa.Schema, batches): + self._schema = schema + self._batches: Deque[pa.RecordBatch] = deque(batches) + self.closed = False + + def arrow_schema(self) -> pa.Schema: + return self._schema + + def fetch_next_batch(self): + if self.closed: + raise RuntimeError("fetched after close") + if not self._batches: + return None + return self._batches.popleft() + + def close(self): + self.closed = True + + +def _make_rs(handle) -> KernelResultSet: + # The base ResultSet __init__ takes a `connection` ref it never + # actually dereferences during these buffer tests, so a Mock is + # fine. + connection = MagicMock() + backend = MagicMock() + return KernelResultSet( + connection=connection, + backend=backend, + kernel_handle=handle, + command_id=CommandId.from_sea_statement_id("smoke-test"), + arraysize=100, + buffer_size_bytes=1024, + ) + + +def _batch(schema: pa.Schema, values) -> pa.RecordBatch: + return pa.RecordBatch.from_arrays( + [pa.array(values, type=schema.field(0).type)], schema=schema + ) + + +# Renamed from `schema` -> `int_schema` because the connector's +# top-level conftest.py defines a session-scoped `schema` fixture +# for E2E tests; pytest's fixture-resolution complains about +# scope-mismatch if we shadow it with a function-scoped one here. +@pytest.fixture +def int_schema(): + return pa.schema([("n", pa.int64())]) + + +def test_description_built_from_kernel_schema(int_schema): + handle = _FakeKernelHandle(int_schema, []) + rs = _make_rs(handle) + assert rs.description == [("n", "bigint", None, None, None, None, None)] + + +def test_fetchall_arrow_drains_all_batches(int_schema): + handle = _FakeKernelHandle( + int_schema, [_batch(int_schema, [1, 2]), _batch(int_schema, [3, 4, 5])] + ) + rs = _make_rs(handle) + table = rs.fetchall_arrow() + assert table.num_rows == 5 + assert table.column(0).to_pylist() == [1, 2, 3, 4, 5] + assert rs.status == CommandState.SUCCEEDED + assert rs.has_more_rows is False + + +def test_fetchmany_arrow_slices_within_batch(int_schema): + handle = _FakeKernelHandle(int_schema, [_batch(int_schema, [10, 20, 30, 40])]) + rs = _make_rs(handle) + t1 = rs.fetchmany_arrow(2) + assert t1.num_rows == 2 and t1.column(0).to_pylist() == [10, 20] + t2 = rs.fetchmany_arrow(2) + assert t2.num_rows == 2 and t2.column(0).to_pylist() == [30, 40] + t3 = rs.fetchmany_arrow(2) + assert t3.num_rows == 0 + + +def test_fetchmany_arrow_spans_batch_boundary(int_schema): + handle = _FakeKernelHandle( + int_schema, + [_batch(int_schema, [1, 2]), _batch(int_schema, [3, 4]), _batch(int_schema, [5, 6])], + ) + rs = _make_rs(handle) + t = rs.fetchmany_arrow(5) + assert t.num_rows == 5 + assert t.column(0).to_pylist() == [1, 2, 3, 4, 5] + t = rs.fetchmany_arrow(2) + assert t.column(0).to_pylist() == [6] + + +def test_fetchone_returns_row_then_none(int_schema): + handle = _FakeKernelHandle(int_schema, [_batch(int_schema, [42])]) + rs = _make_rs(handle) + row = rs.fetchone() + assert row is not None + assert row[0] == 42 + assert rs.fetchone() is None + + +def test_fetchall_rows(int_schema): + handle = _FakeKernelHandle( + int_schema, [_batch(int_schema, [1, 2]), _batch(int_schema, [3])] + ) + rs = _make_rs(handle) + rows = rs.fetchall() + assert [r[0] for r in rows] == [1, 2, 3] + + +def test_fetchmany_negative_raises(int_schema): + rs = _make_rs(_FakeKernelHandle(int_schema, [])) + with pytest.raises(ValueError): + rs.fetchmany(-1) + with pytest.raises(ValueError): + rs.fetchmany_arrow(-1) + + +def test_close_is_idempotent_and_calls_handle(int_schema): + handle = _FakeKernelHandle(int_schema, [_batch(int_schema, [1])]) + rs = _make_rs(handle) + rs.close() + assert handle.closed is True + assert rs.status == CommandState.CLOSED + rs.close() # second call is a no-op (kernel handle is None) + + +def test_empty_stream(int_schema): + rs = _make_rs(_FakeKernelHandle(int_schema, [])) + assert rs.fetchone() is None + assert rs.fetchall_arrow().num_rows == 0 + assert rs.status == CommandState.SUCCEEDED + + +def test_close_swallows_handle_close_failures(int_schema): + """ResultSet.close() must not raise even if the kernel + handle's close() fails — PEP 249 discourages exceptions from + close paths (cursor/connection teardown depends on it).""" + handle = _FakeKernelHandle(int_schema, []) + handle.close = MagicMock(side_effect=RuntimeError("kernel boom")) + rs = _make_rs(handle) + rs.close() # must not raise + assert rs.status == CommandState.CLOSED diff --git a/tests/unit/test_kernel_type_mapping.py b/tests/unit/test_kernel_type_mapping.py new file mode 100644 index 000000000..5ab5bde74 --- /dev/null +++ b/tests/unit/test_kernel_type_mapping.py @@ -0,0 +1,73 @@ +"""Unit tests for Arrow → PEP 249 description-string mapping.""" + +from __future__ import annotations + +import pytest + +# pyarrow is an optional connector dep; the default-deps CI test +# job runs without it. The kernel backend itself imports pyarrow +# at module load, so any test that touches the backend must skip +# when pyarrow is unavailable. +pa = pytest.importorskip("pyarrow") + +from databricks.sql.backend.kernel.type_mapping import ( + _arrow_type_to_dbapi_string, + description_from_arrow_schema, +) + + +@pytest.mark.parametrize( + "arrow_type, expected", + [ + (pa.bool_(), "boolean"), + (pa.int8(), "tinyint"), + (pa.int16(), "smallint"), + (pa.int32(), "int"), + (pa.int64(), "bigint"), + (pa.float32(), "float"), + (pa.float64(), "double"), + (pa.decimal128(10, 2), "decimal"), + (pa.string(), "string"), + (pa.large_string(), "string"), + (pa.binary(), "binary"), + (pa.large_binary(), "binary"), + (pa.date32(), "date"), + (pa.timestamp("us"), "timestamp"), + (pa.list_(pa.int32()), "array"), + (pa.large_list(pa.int32()), "array"), + (pa.struct([("a", pa.int32())]), "struct"), + (pa.map_(pa.string(), pa.int32()), "map"), + ], +) +def test_arrow_to_dbapi_known_types(arrow_type, expected): + assert _arrow_type_to_dbapi_string(arrow_type) == expected + + +def test_arrow_to_dbapi_unknown_falls_back_to_str(): + # null type isn't in the explicit list but should fall through + # to the default str() so unknown variants are still printable + # rather than silently misclassified. + assert _arrow_type_to_dbapi_string(pa.null()) == "null" + + +def test_description_from_schema_preserves_field_names_and_order(): + schema = pa.schema( + [ + ("user_id", pa.int64()), + ("name", pa.string()), + ("created_at", pa.timestamp("us")), + ] + ) + desc = description_from_arrow_schema(schema) + assert len(desc) == 3 + assert [(d[0], d[1]) for d in desc] == [ + ("user_id", "bigint"), + ("name", "string"), + ("created_at", "timestamp"), + ] + # PEP 249 says all 7-tuples; the last 5 slots are None for the + # kernel backend (we don't report display_size / precision / + # scale / nullability). + for d in desc: + assert len(d) == 7 + assert d[2:] == (None, None, None, None, None)