Skip to content
Merged
Show file tree
Hide file tree
Changes from 27 commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
9afdefb
add webhook ux improvement
Cristhianzl Dec 15, 2025
1f1ea4b
merge fix
Cristhianzl Dec 15, 2025
e40b8c3
revert starter_prjects
Cristhianzl Dec 15, 2025
5747f4f
improve code quality
Cristhianzl Dec 15, 2025
0a3c810
test(webhook): add delay to test for background task completion after…
Cristhianzl Dec 15, 2025
4586406
style(endpoints.py): reorder import statements for better organizatio…
Cristhianzl Dec 15, 2025
b74e4f4
Merge remote-tracking branch 'origin/main' into cz/webhook-ux-v2
Cristhianzl Dec 15, 2025
a20bd3a
fix: regenerate package-lock.json to fix npm ci sync issue
Cristhianzl Dec 15, 2025
5e4cf95
[autofix.ci] apply automated fixes
autofix-ci[bot] Dec 15, 2025
0e373a2
[autofix.ci] apply automated fixes (attempt 2/3)
autofix-ci[bot] Dec 15, 2025
b86e3ba
[autofix.ci] apply automated fixes (attempt 3/3)
autofix-ci[bot] Dec 15, 2025
d9e0943
merge fix
Cristhianzl Jan 15, 2026
1267c20
restore lock
Cristhianzl Jan 15, 2026
0c572ca
merge main
Cristhianzl Jan 15, 2026
337ce22
rollback changes
Cristhianzl Jan 15, 2026
8ed49f5
[autofix.ci] apply automated fixes
autofix-ci[bot] Jan 15, 2026
8cd6514
Merge branch 'cz/webhook-ux-v2' of github.com:langflow-ai/langflow in…
Cristhianzl Jan 15, 2026
55dec06
[autofix.ci] apply automated fixes
autofix-ci[bot] Jan 15, 2026
8ff6c09
refactor for good pratices
Cristhianzl Jan 15, 2026
ee56d5c
improve tests perf
Cristhianzl Jan 15, 2026
c5ba929
Merge branch 'cz/webhook-ux-v2' of github.com:langflow-ai/langflow in…
Cristhianzl Jan 15, 2026
3588a9d
[autofix.ci] apply automated fixes
autofix-ci[bot] Jan 15, 2026
2eddde4
fix sse events frontend
Cristhianzl Jan 16, 2026
5f12e4d
[autofix.ci] apply automated fixes
autofix-ci[bot] Jan 16, 2026
dfba94f
merge fix
Cristhianzl Jan 20, 2026
6b4fe25
[autofix.ci] apply automated fixes
autofix-ci[bot] Jan 20, 2026
ecf6ca9
[autofix.ci] apply automated fixes (attempt 2/3)
autofix-ci[bot] Jan 20, 2026
3336ce9
add output sse event
Cristhianzl Jan 20, 2026
a31a9b8
merge fix
Cristhianzl Jan 20, 2026
04349a7
merge fix
Cristhianzl Jan 20, 2026
e17f0dc
[autofix.ci] apply automated fixes
autofix-ci[bot] Jan 20, 2026
924bbd9
[autofix.ci] apply automated fixes (attempt 2/3)
autofix-ci[bot] Jan 20, 2026
047fb89
mypy fixes
Cristhianzl Jan 20, 2026
a286fa9
remove noisy debug logs
Cristhianzl Jan 20, 2026
085edff
remove sse when we have no webhook on canvas
Cristhianzl Jan 20, 2026
5f202ef
Merge branch 'main' into cz/webhook-ux-v2
carlosrcoelho Jan 21, 2026
f0572e6
Merge branch 'main' into cz/webhook-ux-v2
Cristhianzl Jan 25, 2026
75b3d09
add get lock for CI improvement
Cristhianzl Jan 25, 2026
1807633
[autofix.ci] apply automated fixes
autofix-ci[bot] Jan 25, 2026
cf4e673
fix tests loading
Cristhianzl Jan 25, 2026
6560323
Merge branch 'cz/webhook-ux-v2' of github.com:langflow-ai/langflow in…
Cristhianzl Jan 25, 2026
4f6a2f2
fix flasky test py
Cristhianzl Jan 26, 2026
75d3f22
ruff style and check
Cristhianzl Jan 26, 2026
4c45305
test: skip flaky test on CI due to resource exhaustion
Cristhianzl Jan 26, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 121 additions & 15 deletions src/backend/base/langflow/api/v1/endpoints.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

import asyncio
import json
import time
from collections.abc import AsyncGenerator
from http import HTTPStatus
Expand Down Expand Up @@ -50,6 +51,7 @@
from langflow.services.database.models.flow.utils import get_all_webhook_components_in_flow
from langflow.services.database.models.user.model import User, UserRead
from langflow.services.deps import get_session_service, get_settings_service, get_telemetry_service
from langflow.services.event_manager import webhook_event_manager
from langflow.services.telemetry.schema import RunPayload
from langflow.utils.compression import compress_response
from langflow.utils.version import get_version_info
Expand All @@ -59,6 +61,9 @@

router = APIRouter(tags=["Base"])

# SSE Constants
SSE_HEARTBEAT_TIMEOUT_SECONDS = 30.0


async def parse_input_request_from_body(http_request: Request) -> SimplifiedAPIRequest:
"""Parse SimplifiedAPIRequest from HTTP request body.
Expand Down Expand Up @@ -195,6 +200,13 @@
raise ValueError(str(exc)) from exc


def _get_vertex_ids_from_flow(flow: Flow) -> list[str]:
"""Extract vertex IDs from flow data."""
if not flow.data or not flow.data.get("nodes"):
return []
return [node.get("id") for node in flow.data.get("nodes", []) if node.get("id")]
Comment on lines +210 to +212
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚡️Codeflash found 27% (0.27x) speedup for _get_vertex_ids_from_flow in src/backend/base/langflow/api/v1/endpoints.py

⏱️ Runtime : 477 microseconds 374 microseconds (best of 117 runs)

📝 Explanation and details

The optimized code achieves a 27% speedup primarily through two key changes:

1. Eliminated Redundant Dictionary Lookups
The original code calls flow.data.get("nodes") twice - once in the condition check and again in the list comprehension. The optimized version stores flow.data.get("nodes") in a nodes variable, cutting this expensive dictionary access in half.

2. Used Walrus Operator to Cache Node IDs
The original list comprehension calls node.get("id") twice per node - once to check truthiness and once to add to the result list. The optimized version uses the walrus operator (:=) to assign node.get("id") to node_id in a single operation, then reuses that value. This eliminates one dictionary lookup per node.

Why This Matters
Dictionary lookups in Python involve hash computation and collision resolution, making them relatively expensive operations. When processing flows with many nodes (the test suite validates up to 999 nodes), these redundant lookups compound significantly. The line profiler shows the list comprehension time dropped from 616,217ns to 541,050ns (12% faster), confirming the per-node optimization impact.

Test Case Performance
The optimization benefits all test cases, but particularly shines with:

  • Large flows (999 nodes test): Maximum benefit from per-node savings
  • Mixed valid/invalid nodes: Reduced overhead on validation checks
  • Standard flows with 3+ nodes: Consistent speedup across typical workloads

The changes preserve all behavior including edge cases (empty data, missing nodes, non-dict entries) while delivering measurable performance gains.

Correctness verification report:

Test Status
⚙️ Existing Unit Tests 17 Passed
🌀 Generated Regression Tests 11 Passed
⏪ Replay Tests 🔘 None Found
🔎 Concolic Coverage Tests 🔘 None Found
📊 Tests Coverage 100.0%
⚙️ Click to see Existing Unit Tests
🌀 Click to see Generated Regression Tests

from dataclasses import dataclass
from typing import Any, Dict, List, Optional

imports

import pytest # used for our unit tests
from langflow.api.v1.endpoints import _get_vertex_ids_from_flow

function to test

The original function referenced Flow from an external module. For the purposes

of unit testing in this self-contained file we define a minimal Flow dataclass

that mirrors the shape the function expects: an object with a data attribute

that may be a dict with a "nodes" list.

@DataClass
class Flow:
data: Optional[Dict[str, Any]] = None
from langflow.api.v1.endpoints import _get_vertex_ids_from_flow

unit tests

Basic Test Cases

def test_basic_returns_ids_in_order():
"""
Basic functionality: nodes with valid 'id' strings should be returned in order.
"""
# Create a flow with three nodes each having an 'id'
flow = Flow(data={"nodes": [{"id": "a"}, {"id": "b"}, {"id": "c"}]})
codeflash_output = _get_vertex_ids_from_flow(flow); result = codeflash_output

def test_ignores_nodes_without_id_or_empty_id():
"""
Nodes missing 'id' or with falsy ids (empty string, None, 0) should be ignored.
"""
# Mix nodes: valid ids, missing id, empty string, None, numeric 0 (falsy), and another valid id
nodes = [
{"id": "1"}, # included
{"other": "x"}, # missing 'id' -> ignored
{"id": ""}, # empty string -> falsy -> ignored
{"id": None}, # None -> falsy -> ignored
{"id": 0}, # 0 -> falsy -> ignored
{"id": "2"}, # included
]
flow = Flow(data={"nodes": nodes})
codeflash_output = _get_vertex_ids_from_flow(flow); result = codeflash_output

def test_handles_empty_nodes_list_and_missing_nodes_key():
"""
When nodes is an empty list -> return empty list.
When data exists but 'nodes' missing -> return empty list.
When data is falsy (None, empty dict) -> return empty list.
"""
# Empty nodes list
flow_empty_nodes = Flow(data={"nodes": []})
codeflash_output = _get_vertex_ids_from_flow(flow_empty_nodes)

# Missing 'nodes' key
flow_no_nodes_key = Flow(data={"something_else": 123})
codeflash_output = _get_vertex_ids_from_flow(flow_no_nodes_key)

# data is None (falsy)
flow_data_none = Flow(data=None)
codeflash_output = _get_vertex_ids_from_flow(flow_data_none)

# data is empty dict (falsy in `if not flow.data`)
flow_data_empty = Flow(data={})
codeflash_output = _get_vertex_ids_from_flow(flow_data_empty)

Edge Test Cases

def test_calling_with_none_flow_raises_attribute_error():
"""
Passing None instead of a Flow should raise AttributeError when the function
attempts to access flow.data. This documents current behavior for invalid input.
"""
with pytest.raises(AttributeError):
# Not a Flow instance -> accessing .data should raise
_get_vertex_ids_from_flow(None) # type: ignore[arg-type]

def test_nodes_with_non_dict_entries_raise_attribute_error():
"""
If nodes list contains elements that are not dict-like (no .get), the function
will attempt to call .get and raise AttributeError. Confirm this behavior.
"""
# Put a non-dict as the first node to ensure the list comprehension hits it and raises.
flow = Flow(data={"nodes": ["not-a-dict", {"id": "ok"}]})
with pytest.raises(AttributeError):
_get_vertex_ids_from_flow(flow)

def test_includes_truthy_non_string_ids_and_preserves_duplicates_and_order():
"""
The function does not coerce types. If an id is a truthy non-string (e.g., True),
it will be included as-is. Duplicate ids should be preserved and order kept.
"""
nodes = [
{"id": True}, # truthy boolean -> included
{"id": "x"}, # included
{"id": "x"}, # duplicate included again
{"id": "0"}, # string "0" is truthy -> included
]
flow = Flow(data={"nodes": nodes})
codeflash_output = _get_vertex_ids_from_flow(flow); result = codeflash_output

Large Scale Test Cases

def test_large_scale_near_thousand_nodes_performance_and_correctness():
"""
Large-scale test: create 999 nodes (just under 1000 per constraints) and ensure
all ids are returned in correct order and the function completes quickly.
This verifies scalability within the given limit.
"""
n = 999 # keep under 1000 as requested
# Build nodes with predictable ids "node_0", "node_1", ..., "node_998"
nodes = [{"id": f"node_{i}"} for i in range(n)]
flow = Flow(data={"nodes": nodes})
codeflash_output = _get_vertex_ids_from_flow(flow); result = codeflash_output

def test_mixed_valid_and_invalid_nodes_in_large_list_stops_on_type_error_when_first_bad():
"""
Ensure that when a large list begins with a malformed node (non-dict), the function
raises immediately (and does not silently skip) -- preserving documented behavior.
We only place one non-dict at the start to force an immediate failure.
"""
n = 500 # moderately large but under constraints
# Start with a bad element to trigger AttributeError, followed by many valid nodes
nodes = ["bad-start"] + [{"id": f"node_{i}"} for i in range(n)]
flow = Flow(data={"nodes": nodes})
with pytest.raises(AttributeError):
_get_vertex_ids_from_flow(flow)

codeflash_output is used to check that the output of the original code is the same as that of the optimized code.

#------------------------------------------------
from unittest.mock import MagicMock, Mock

imports

import pytest
from langflow.api.v1.endpoints import _get_vertex_ids_from_flow
from langflow.services.database.models.flow.model import Flow

============================================================================

BASIC TEST CASES

============================================================================

To test or edit this optimization locally git merge codeflash/optimize-pr11028-2026-01-15T21.10.46

Suggested change
if not flow.data or not flow.data.get("nodes"):
return []
return [node.get("id") for node in flow.data.get("nodes", []) if node.get("id")]
if not flow.data:
return []
nodes = flow.data.get("nodes")
if not nodes:
return []
return [node_id for node in nodes if (node_id := node.get("id"))]



async def simple_run_flow_task(
flow: Flow,
input_request: SimplifiedAPIRequest,
Expand All @@ -205,9 +217,34 @@
telemetry_service=None,
start_time: float | None = None,
run_id: str | None = None,
emit_events: bool = False,
flow_id: str | None = None,
):
"""Run a flow task as a BackgroundTask, therefore it should not throw exceptions."""
"""Run a flow task as a BackgroundTask, therefore it should not throw exceptions.

Args:
flow: The flow to execute
input_request: The simplified API request
stream: Whether to stream results
api_key_user: The user executing the flow
event_manager: Event manager for streaming
telemetry_service: Service for logging telemetry
start_time: Start time for duration calculation
run_id: Unique ID for this run
emit_events: Whether to emit events to webhook_event_manager (for UI feedback)
flow_id: Flow ID for event emission (required if emit_events=True)
"""
should_emit = emit_events and flow_id

try:
if should_emit:
vertex_ids = _get_vertex_ids_from_flow(flow)
await webhook_event_manager.emit(
flow_id,
"vertices_sorted",
{"ids": vertex_ids, "to_run": vertex_ids, "run_id": run_id},
)

result = await simple_run_flow(
flow=flow,
input_request=input_request,
Expand All @@ -216,6 +253,10 @@
event_manager=event_manager,
run_id=run_id,
)

if should_emit:
await webhook_event_manager.emit(flow_id, "end", {"run_id": run_id, "success": True})

if telemetry_service and start_time is not None:
await telemetry_service.log_package_run(
RunPayload(
Expand All @@ -230,6 +271,10 @@

except Exception as exc: # noqa: BLE001
await logger.aexception(f"Error running flow {flow.id} task")

if should_emit:
await webhook_event_manager.emit(flow_id, "end", {"run_id": run_id, "success": False, "error": str(exc)})

if telemetry_service and start_time is not None:
await telemetry_service.log_package_run(
RunPayload(
Expand Down Expand Up @@ -608,23 +653,73 @@
)


@router.get("/webhook-events/{flow_id_or_name}")
async def webhook_events_stream(
flow_id_or_name: str,

Check failure on line 658 in src/backend/base/langflow/api/v1/endpoints.py

View workflow job for this annotation

GitHub Actions / Ruff Style Check (3.13)

Ruff (ARG001)

src/backend/base/langflow/api/v1/endpoints.py:658:5: ARG001 Unused function argument: `flow_id_or_name`
flow: Annotated[Flow, Depends(get_flow_by_id_or_endpoint_name)],
request: Request,
):
"""Server-Sent Events (SSE) endpoint for real-time webhook build updates.

When a flow is open in the UI, this endpoint provides live feedback
of webhook execution progress, similar to clicking "Play" in the UI.

Note: Authentication is handled by get_flow_by_id_or_endpoint_name which
validates the user has access to the flow.
"""

async def event_generator() -> AsyncGenerator[str, None]:
"""Generate SSE events from the webhook event manager."""
flow_id_str = str(flow.id)
queue = await webhook_event_manager.subscribe(flow_id_str)

try:
# Send initial connection event
yield f"event: connected\ndata: {json.dumps({'flow_id': flow_id_str, 'flow_name': flow.name})}\n\n"

while True:
if await request.is_disconnected():
break

try:
event = await asyncio.wait_for(queue.get(), timeout=SSE_HEARTBEAT_TIMEOUT_SECONDS)
event_type = event["event"]
event_data = json.dumps(event["data"])
yield f"event: {event_type}\ndata: {event_data}\n\n"
except asyncio.TimeoutError:
yield f"event: heartbeat\ndata: {json.dumps({'timestamp': time.time()})}\n\n"

except asyncio.CancelledError:
pass
finally:
await webhook_event_manager.unsubscribe(flow_id_str, queue)

return StreamingResponse(
event_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)


@router.post("/webhook/{flow_id_or_name}", response_model=dict, status_code=HTTPStatus.ACCEPTED) # noqa: RUF100, FAST003
async def webhook_run_flow(
flow_id_or_name: str,
flow: Annotated[Flow, Depends(get_flow_by_id_or_endpoint_name)],
request: Request,
background_tasks: BackgroundTasks,
):
"""Run a flow using a webhook request.

Args:
flow_id_or_name (str): The flow ID or endpoint name.
flow (Flow): The flow to be executed.
request (Request): The incoming HTTP request.
background_tasks (BackgroundTasks): The background tasks manager.
flow_id_or_name: The flow ID or endpoint name (used by dependency).
flow: The flow to be executed.
request: The incoming HTTP request.

Returns:
dict: A dictionary containing the status of the task.
A dictionary containing the status of the task.

Raises:
HTTPException: If the flow is not found or if there is an error processing the request.
Expand Down Expand Up @@ -662,17 +757,28 @@
session_id=None,
)

# Check if there are UI listeners connected via SSE
flow_id_str = str(flow.id)
has_ui_listeners = webhook_event_manager.has_listeners(flow_id_str)

await logger.adebug("Starting background task")
run_id = str(uuid4())
background_tasks.add_task(
simple_run_flow_task,
flow=flow,
input_request=input_request,
api_key_user=webhook_user,
telemetry_service=telemetry_service,
start_time=start_time,
run_id=run_id,

# Use asyncio.create_task to run in same event loop (needed for SSE)
background_task = asyncio.create_task(
simple_run_flow_task(
flow=flow,
input_request=input_request,
api_key_user=webhook_user,
telemetry_service=telemetry_service,
start_time=start_time,
run_id=run_id,
emit_events=has_ui_listeners,
flow_id=flow_id_str,
)
)
# Fire-and-forget: log exceptions but don't block
background_task.add_done_callback(lambda t: t.exception() if not t.cancelled() else None)
except Exception as exc:
error_msg = str(exc)
raise HTTPException(status_code=500, detail=error_msg) from exc
Expand Down
168 changes: 168 additions & 0 deletions src/backend/base/langflow/services/event_manager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
"""Event Manager for Webhook Real-Time Updates.

This module provides an in-memory event broadcasting system for webhook builds.
When a UI is connected via SSE, it receives real-time build events.
"""

from __future__ import annotations

import asyncio
import time
from collections import defaultdict
from typing import Any

from loguru import logger

# Constants
SSE_QUEUE_MAX_SIZE = 100
SSE_EMIT_TIMEOUT_SECONDS = 1.0
SECONDS_PER_MINUTE = 60


class WebhookEventManager:
"""Manages SSE connections and broadcasts build events for webhooks.

When a flow is open in the UI, it subscribes to webhook events.
When a webhook is triggered, events are emitted to all subscribers.

This provides the same visual experience as clicking "Play" in the UI,
but triggered by external webhook calls.
"""

def __init__(self):
"""Initialize the event manager with empty listeners."""
self._listeners: dict[str, set[asyncio.Queue]] = defaultdict(set)
self._vertex_start_times: dict[str, dict[str, float]] = defaultdict(dict)
self._lock = asyncio.Lock()

def record_build_start(self, flow_id: str, vertex_id: str) -> None:
"""Record when a vertex build starts for duration calculation."""
self._vertex_start_times[flow_id][vertex_id] = time.time()

def get_build_duration(self, flow_id: str, vertex_id: str) -> str | None:
"""Get the formatted build duration for a vertex."""
start_time = self._vertex_start_times.get(flow_id, {}).get(vertex_id)
if start_time is None:
return None
elapsed = time.time() - start_time
# Clean up
self._vertex_start_times[flow_id].pop(vertex_id, None)
return self._format_duration(elapsed)

@staticmethod
def _format_duration(seconds: float) -> str:
"""Format duration in a human-readable way."""
if seconds < 1:
return f"{int(seconds * 1000)} ms"
if seconds < SECONDS_PER_MINUTE:
return f"{seconds:.1f} s"
minutes = int(seconds // SECONDS_PER_MINUTE)
secs = seconds % SECONDS_PER_MINUTE
return f"{minutes}m {secs:.1f}s"

async def subscribe(self, flow_id: str) -> asyncio.Queue:
"""Subscribe to receive events for a specific flow.

Args:
flow_id: The flow ID to subscribe to

Returns:
Queue that will receive events for this flow
"""
queue: asyncio.Queue = asyncio.Queue(maxsize=SSE_QUEUE_MAX_SIZE)
async with self._lock:
self._listeners[flow_id].add(queue)
listener_count = len(self._listeners[flow_id])

logger.info(f"New subscriber for flow {flow_id}. Total listeners: {listener_count}")
return queue

async def unsubscribe(self, flow_id: str, queue: asyncio.Queue) -> None:
"""Unsubscribe from flow events.

Args:
flow_id: The flow ID to unsubscribe from
queue: The queue to remove
"""
async with self._lock:
if flow_id in self._listeners:
self._listeners[flow_id].discard(queue)
listener_count = len(self._listeners[flow_id])

# Clean up empty sets
if not self._listeners[flow_id]:
del self._listeners[flow_id]
logger.info(f"All subscribers disconnected for flow {flow_id}")
else:
logger.info(f"Subscriber disconnected from flow {flow_id}. Remaining: {listener_count}")

async def emit(self, flow_id: str, event_type: str, data: Any) -> None:
"""Emit an event to all subscribers of a flow.

Args:
flow_id: The flow ID to emit to
event_type: Type of event (build_start, end_vertex, etc.)
data: Event data (will be JSON serialized)
"""
async with self._lock:
listeners = self._listeners.get(flow_id, set()).copy()

if not listeners:
# No one listening, skip emission (performance optimization)
return

logger.debug(f"Emitting {event_type} to {len(listeners)} listeners for flow {flow_id}")

# Prepare event
event = {
"event": event_type,
"data": data,
"timestamp": time.time(),
}

# Send to all queues
dead_queues: set[asyncio.Queue] = set()

for queue in listeners:
try:
await asyncio.wait_for(queue.put(event), timeout=SSE_EMIT_TIMEOUT_SECONDS)
except asyncio.TimeoutError:
# Queue is full (slow consumer), skip this event
logger.warning(f"Queue full for flow {flow_id}, dropping event {event_type}")
except Exception as e: # noqa: BLE001
# Queue is closed or broken, mark for removal
logger.error(f"Error putting event in queue for flow {flow_id}: {e}")
dead_queues.add(queue)

# Clean up dead queues
if dead_queues:
async with self._lock:
if flow_id in self._listeners:
self._listeners[flow_id] -= dead_queues
if not self._listeners[flow_id]:
del self._listeners[flow_id]

def has_listeners(self, flow_id: str) -> bool:
"""Check if there are any active listeners for a flow."""
return flow_id in self._listeners and len(self._listeners[flow_id]) > 0


# Module-level instance (can be replaced in tests via dependency injection)
# TODO: Consider migrating to langflow's service manager pattern for better DI
_webhook_event_manager: WebhookEventManager | None = None


def get_webhook_event_manager() -> WebhookEventManager:
"""Get the webhook event manager instance.

Returns:
The WebhookEventManager singleton instance.
"""
global _webhook_event_manager # noqa: PLW0603
if _webhook_event_manager is None:
_webhook_event_manager = WebhookEventManager()
return _webhook_event_manager


# Backwards compatibility alias
webhook_event_manager = get_webhook_event_manager()
Loading
Loading