feat(01-03): integration tests for Slack flow, rate limiting, and agent persona
- tests/unit/test_ratelimit.py: 11 tests for Redis token bucket (CHAN-05) - allows requests under limit, rejects 31st request - per-tenant isolation, per-channel isolation - TTL key expiry and window reset - tests/integration/test_slack_flow.py: 15 tests for end-to-end Slack flow (CHAN-02) - normalization: bot token stripped, channel=slack, thread_id set - @mention: placeholder posted in-thread, Celery dispatched with placeholder_ts - DM flow: same pipeline triggered for channel_type=im - bot messages silently ignored (no infinite loop) - unknown workspace_id silently ignored - duplicate events (Slack retries) skipped via idempotency - tests/integration/test_agent_persona.py: 15 tests for persona in prompts (AGNT-01) - system prompt contains name, role, persona, AI transparency clause - model_preference forwarded to LLM pool - full messages array: [system, user] structure verified - tests/integration/test_ratelimit.py: 4 tests for rate limit integration - over-limit -> ephemeral rejection posted - over-limit -> Celery NOT dispatched, placeholder NOT posted - within-limit -> no rejection - ephemeral message includes actionable retry hint All 45 tests pass
This commit is contained in:
307
tests/integration/test_agent_persona.py
Normal file
307
tests/integration/test_agent_persona.py
Normal file
@@ -0,0 +1,307 @@
|
||||
"""
|
||||
Integration tests for agent persona reflection in LLM system prompts (AGNT-01).
|
||||
|
||||
Tests verify:
|
||||
1. The system prompt contains the agent's name, role, and persona
|
||||
2. The AI transparency clause is always present
|
||||
3. model_preference from the agent config is passed to the LLM pool
|
||||
4. The full message array (system + user) is correctly structured
|
||||
|
||||
These tests mock the LLM pool HTTP call — no real LLM API keys required.
|
||||
They test the orchestrator -> agent builder -> runner chain in isolation.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from orchestrator.agents.builder import build_messages, build_system_prompt
|
||||
|
||||
|
||||
class _MockAgent:
|
||||
"""Minimal mock of the Agent ORM model for unit testing the builder."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
role: str,
|
||||
persona: str,
|
||||
system_prompt: str = "",
|
||||
model_preference: str = "quality",
|
||||
) -> None:
|
||||
self.id = uuid.uuid4()
|
||||
self.tenant_id = uuid.uuid4()
|
||||
self.name = name
|
||||
self.role = role
|
||||
self.persona = persona
|
||||
self.system_prompt = system_prompt
|
||||
self.model_preference = model_preference
|
||||
self.is_active = True
|
||||
|
||||
|
||||
class TestAgentPersonaInSystemPrompt:
|
||||
"""AGNT-01: Agent identity and persona must appear in the system prompt."""
|
||||
|
||||
def test_agent_name_in_system_prompt(self) -> None:
|
||||
"""System prompt must contain 'Your name is {agent.name}'."""
|
||||
agent = _MockAgent(name="Mara", role="Customer Support", persona="Professional and empathetic")
|
||||
prompt = build_system_prompt(agent)
|
||||
assert "Mara" in prompt
|
||||
assert "Your name is Mara" in prompt
|
||||
|
||||
def test_agent_role_in_system_prompt(self) -> None:
|
||||
"""System prompt must contain the agent's role."""
|
||||
agent = _MockAgent(name="Mara", role="Customer Support", persona="Professional and empathetic")
|
||||
prompt = build_system_prompt(agent)
|
||||
assert "Customer Support" in prompt
|
||||
assert "Your role is Customer Support" in prompt
|
||||
|
||||
def test_agent_persona_in_system_prompt(self) -> None:
|
||||
"""System prompt must include the agent's persona text."""
|
||||
agent = _MockAgent(
|
||||
name="Mara",
|
||||
role="Customer Support",
|
||||
persona="Professional and empathetic",
|
||||
)
|
||||
prompt = build_system_prompt(agent)
|
||||
assert "Professional and empathetic" in prompt
|
||||
|
||||
def test_ai_transparency_clause_always_present(self) -> None:
|
||||
"""
|
||||
The AI transparency clause must be present in every system prompt,
|
||||
regardless of agent configuration.
|
||||
|
||||
Agents must acknowledge they are AIs when directly asked.
|
||||
"""
|
||||
agent = _MockAgent(name="Mara", role="Support", persona="")
|
||||
prompt = build_system_prompt(agent)
|
||||
# The clause uses the word "AI" — verify it's unconditionally injected
|
||||
assert "AI" in prompt or "artificial intelligence" in prompt.lower()
|
||||
# Verify the specific phrase from builder.py
|
||||
assert "you are an AI" in prompt.lower() or "you are an ai" in prompt.lower()
|
||||
|
||||
def test_ai_transparency_present_even_with_empty_persona(self) -> None:
|
||||
"""Transparency clause must appear even when persona is empty."""
|
||||
agent = _MockAgent(name="Bot", role="Assistant", persona="")
|
||||
prompt = build_system_prompt(agent)
|
||||
assert "AI" in prompt
|
||||
|
||||
def test_custom_system_prompt_included(self) -> None:
|
||||
"""If agent has a base system_prompt, it must appear in the output."""
|
||||
agent = _MockAgent(
|
||||
name="Mara",
|
||||
role="Support",
|
||||
persona="Helpful",
|
||||
system_prompt="Always be concise.",
|
||||
)
|
||||
prompt = build_system_prompt(agent)
|
||||
assert "Always be concise." in prompt
|
||||
|
||||
def test_full_persona_customer_support_scenario(self) -> None:
|
||||
"""
|
||||
Full system prompt for a 'Mara' customer support agent must contain
|
||||
all required elements.
|
||||
"""
|
||||
agent = _MockAgent(
|
||||
name="Mara",
|
||||
role="Customer Support",
|
||||
persona="Professional, empathetic, solution-oriented.",
|
||||
)
|
||||
prompt = build_system_prompt(agent)
|
||||
|
||||
assert "Mara" in prompt
|
||||
assert "Customer Support" in prompt
|
||||
assert "Professional, empathetic, solution-oriented." in prompt
|
||||
assert "AI" in prompt # Transparency clause
|
||||
|
||||
def test_name_and_role_on_same_line(self) -> None:
|
||||
"""Name and role must appear together in the identity sentence."""
|
||||
agent = _MockAgent(name="Atlas", role="DevOps Engineer", persona="")
|
||||
prompt = build_system_prompt(agent)
|
||||
assert "Your name is Atlas. Your role is DevOps Engineer." in prompt
|
||||
|
||||
|
||||
class TestAgentPersonaInMessages:
|
||||
"""Verify the full messages array structure passed to the LLM pool."""
|
||||
|
||||
def test_messages_has_system_message_first(self) -> None:
|
||||
"""The first message must be the system message."""
|
||||
agent = _MockAgent(name="Mara", role="Support", persona="Helpful")
|
||||
prompt = build_system_prompt(agent)
|
||||
messages = build_messages(system_prompt=prompt, user_message="Hello")
|
||||
assert messages[0]["role"] == "system"
|
||||
assert messages[0]["content"] == prompt
|
||||
|
||||
def test_messages_has_user_message_last(self) -> None:
|
||||
"""The last message must be the user message."""
|
||||
agent = _MockAgent(name="Mara", role="Support", persona="Helpful")
|
||||
prompt = build_system_prompt(agent)
|
||||
user_text = "Can you help with my order?"
|
||||
messages = build_messages(system_prompt=prompt, user_message=user_text)
|
||||
assert messages[-1]["role"] == "user"
|
||||
assert messages[-1]["content"] == user_text
|
||||
|
||||
def test_messages_has_exactly_two_entries_no_history(self) -> None:
|
||||
"""Without history, messages must have exactly [system, user]."""
|
||||
agent = _MockAgent(name="Mara", role="Support", persona="Helpful")
|
||||
prompt = build_system_prompt(agent)
|
||||
messages = build_messages(system_prompt=prompt, user_message="Hi")
|
||||
assert len(messages) == 2
|
||||
|
||||
def test_messages_includes_history_in_order(self) -> None:
|
||||
"""Conversation history must appear between system and user messages."""
|
||||
agent = _MockAgent(name="Mara", role="Support", persona="Helpful")
|
||||
prompt = build_system_prompt(agent)
|
||||
history = [
|
||||
{"role": "user", "content": "Previous question"},
|
||||
{"role": "assistant", "content": "Previous answer"},
|
||||
]
|
||||
messages = build_messages(system_prompt=prompt, user_message="Follow-up", history=history)
|
||||
# Structure: system, history[0], history[1], user
|
||||
assert len(messages) == 4
|
||||
assert messages[1] == history[0]
|
||||
assert messages[2] == history[1]
|
||||
assert messages[-1]["role"] == "user"
|
||||
|
||||
|
||||
class TestModelPreferencePassthrough:
|
||||
"""Verify model_preference is passed correctly to the LLM pool."""
|
||||
|
||||
async def test_model_preference_passed_to_llm_pool(self) -> None:
|
||||
"""
|
||||
The agent's model_preference must be forwarded as the 'model' field
|
||||
in the LLM pool /complete request payload.
|
||||
"""
|
||||
from orchestrator.agents.runner import run_agent
|
||||
from shared.models.message import ChannelType, KonstructMessage, MessageContent, SenderInfo
|
||||
from datetime import datetime, timezone
|
||||
|
||||
agent = _MockAgent(
|
||||
name="Mara",
|
||||
role="Customer Support",
|
||||
persona="Professional and empathetic",
|
||||
model_preference="quality",
|
||||
)
|
||||
|
||||
msg = KonstructMessage(
|
||||
tenant_id=str(agent.tenant_id),
|
||||
channel=ChannelType.SLACK,
|
||||
channel_metadata={"workspace_id": "T-TEST"},
|
||||
sender=SenderInfo(user_id="U1", display_name="Test User"),
|
||||
content=MessageContent(text="Hello Mara"),
|
||||
timestamp=datetime.now(tz=timezone.utc),
|
||||
)
|
||||
|
||||
captured_payloads: list[dict] = []
|
||||
|
||||
async def mock_post_response(*args, **kwargs):
|
||||
payload = kwargs.get("json", {})
|
||||
captured_payloads.append(payload)
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.status_code = 200
|
||||
mock_resp.json.return_value = {"content": "Hello from Mara!", "model": "quality"}
|
||||
return mock_resp
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_http_class:
|
||||
mock_http_instance = AsyncMock()
|
||||
mock_http_instance.__aenter__ = AsyncMock(return_value=mock_http_instance)
|
||||
mock_http_instance.__aexit__ = AsyncMock(return_value=False)
|
||||
mock_http_instance.post = AsyncMock(side_effect=mock_post_response)
|
||||
mock_http_class.return_value = mock_http_instance
|
||||
|
||||
result = await run_agent(msg, agent)
|
||||
|
||||
assert len(captured_payloads) == 1
|
||||
payload = captured_payloads[0]
|
||||
assert payload["model"] == "quality"
|
||||
|
||||
async def test_llm_response_returned_as_string(self) -> None:
|
||||
"""run_agent must return the LLM response as a plain string."""
|
||||
from orchestrator.agents.runner import run_agent
|
||||
from shared.models.message import ChannelType, KonstructMessage, MessageContent, SenderInfo
|
||||
from datetime import datetime, timezone
|
||||
|
||||
agent = _MockAgent(
|
||||
name="Mara",
|
||||
role="Support",
|
||||
persona="Helpful",
|
||||
model_preference="fast",
|
||||
)
|
||||
|
||||
msg = KonstructMessage(
|
||||
tenant_id=str(agent.tenant_id),
|
||||
channel=ChannelType.SLACK,
|
||||
channel_metadata={},
|
||||
sender=SenderInfo(user_id="U1", display_name="Test"),
|
||||
content=MessageContent(text="What is 2+2?"),
|
||||
timestamp=datetime.now(tz=timezone.utc),
|
||||
)
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_http_class:
|
||||
mock_http_instance = AsyncMock()
|
||||
mock_http_instance.__aenter__ = AsyncMock(return_value=mock_http_instance)
|
||||
mock_http_instance.__aexit__ = AsyncMock(return_value=False)
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {"content": "The answer is 4.", "model": "fast"}
|
||||
mock_http_instance.post = AsyncMock(return_value=mock_response)
|
||||
mock_http_class.return_value = mock_http_instance
|
||||
|
||||
result = await run_agent(msg, agent)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert result == "The answer is 4."
|
||||
|
||||
async def test_system_prompt_forwarded_to_llm_pool(self) -> None:
|
||||
"""
|
||||
The system prompt (including persona + AI clause) must be the first
|
||||
message in the array sent to the LLM pool.
|
||||
"""
|
||||
from orchestrator.agents.runner import run_agent
|
||||
from shared.models.message import ChannelType, KonstructMessage, MessageContent, SenderInfo
|
||||
from datetime import datetime, timezone
|
||||
|
||||
agent = _MockAgent(
|
||||
name="Mara",
|
||||
role="Customer Support",
|
||||
persona="Professional and empathetic",
|
||||
)
|
||||
|
||||
msg = KonstructMessage(
|
||||
tenant_id=str(agent.tenant_id),
|
||||
channel=ChannelType.SLACK,
|
||||
channel_metadata={},
|
||||
sender=SenderInfo(user_id="U1", display_name="Test"),
|
||||
content=MessageContent(text="hi"),
|
||||
timestamp=datetime.now(tz=timezone.utc),
|
||||
)
|
||||
|
||||
captured_messages: list = []
|
||||
|
||||
async def capture_request(*args, **kwargs):
|
||||
captured_messages.extend(kwargs.get("json", {}).get("messages", []))
|
||||
mock_resp = MagicMock()
|
||||
mock_resp.status_code = 200
|
||||
mock_resp.json.return_value = {"content": "Hello!", "model": "quality"}
|
||||
return mock_resp
|
||||
|
||||
with patch("httpx.AsyncClient") as mock_http_class:
|
||||
mock_http_instance = AsyncMock()
|
||||
mock_http_instance.__aenter__ = AsyncMock(return_value=mock_http_instance)
|
||||
mock_http_instance.__aexit__ = AsyncMock(return_value=False)
|
||||
mock_http_instance.post = AsyncMock(side_effect=capture_request)
|
||||
mock_http_class.return_value = mock_http_instance
|
||||
|
||||
await run_agent(msg, agent)
|
||||
|
||||
assert len(captured_messages) >= 2
|
||||
system_msg = captured_messages[0]
|
||||
assert system_msg["role"] == "system"
|
||||
# System prompt must contain all persona elements
|
||||
assert "Mara" in system_msg["content"]
|
||||
assert "Customer Support" in system_msg["content"]
|
||||
assert "Professional and empathetic" in system_msg["content"]
|
||||
assert "AI" in system_msg["content"] # Transparency clause
|
||||
Reference in New Issue
Block a user