Files
local_swarm/tests/test_tool_trigger.py
T
sleepy dcca89d89a fix: OpenAI API compatibility for hollama and other clients
- Fixed ChatMessage.tool_calls to be Optional with default None (excluded when empty)
- Added logprobs field to ChatCompletionChoice (always included as null)
- Added stats and system_fingerprint to ChatCompletionResponse
- Fixed streaming response to use delta format (not message format)
- Fixed non-streaming response to include logprobs: null
- Updated tool instructions to include 'NO explanations'
- Added pytest-asyncio markers to async tests
- All 41 tests passing

This fixes the 'Cannot read properties of undefined (reading content)' error in hollama and ensures compatibility with OpenAI clients.
2026-02-25 19:39:05 +01:00

106 lines
3.6 KiB
Python

"""Test to verify tool execution is triggered when model generates tool calls."""
import asyncio
import sys
import os
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
@pytest.mark.asyncio
async def test_tool_execution_triggered():
"""Verify that tool execution is properly triggered."""
from api.models import ChatMessage, ChatCompletionRequest
from api.chat_handlers import handle_chat_completion
from api.tool_parser import parse_tool_calls
from tools.executor import ToolExecutor, set_tool_executor
print("=" * 60)
print("Tool Execution Trigger Test")
print("=" * 60)
# Create a mock swarm that generates a tool call
class MockSwarm:
async def generate(self, prompt, max_tokens, temperature, use_consensus):
# First call: generate tool call
if "user" in prompt and "echo hello" in prompt:
return MockResult("TOOL: bash\nARGUMENTS: {\"command\": \"echo hello\"}")
# Second call: after tool result, generate answer
elif "tool" in prompt.lower():
return MockResult("Output: hello\nThe command executed successfully!")
else:
return MockResult("I don't understand")
class MockResult:
def __init__(self, text):
self.selected_response = MockSelectedResponse(text)
class MockSelectedResponse:
def __init__(self, text):
self.text = text
self.tokens_generated = 20
self.tokens_per_second = 5.0
# Set up tool executor
executor = ToolExecutor(tool_host_url=None)
set_tool_executor(executor)
# Create request
request = ChatCompletionRequest(
model="test-model",
messages=[ChatMessage(role="user", content="echo hello")],
tools=None, # No explicit tools - should still parse from response
max_tokens=1024,
temperature=0.7
)
print("\n1. Testing that tool calls are parsed...")
model_response = "TOOL: bash\nARGUMENTS: {\"command\": \"echo hello\"}"
content, tool_calls = parse_tool_calls(model_response)
assert tool_calls is not None, "Tool calls should be parsed from response"
assert len(tool_calls) == 1, "Should have one tool call"
print(f" ✓ Tool call parsed: {tool_calls[0]['function']['name']}")
print("\n2. Verifying tool executor is set...")
from tools.executor import get_tool_executor
current_executor = get_tool_executor()
assert current_executor is not None, "Tool executor should be set"
print(f" ✓ Tool executor configured: {current_executor.tool_host_url or 'local'}")
print("\n3. Testing tool execution...")
# Try to execute the tool
try:
from api.routes import execute_tool_server_side
result = await execute_tool_server_side(
"bash",
{"command": "echo hello"},
working_dir=None
)
print(f" ✓ Tool executed successfully")
print(f" ✓ Result: {result[:50]}..." if len(result) > 50 else f" ✓ Result: {result}")
except Exception as e:
print(f" ✗ Tool execution failed: {e}")
raise
print("\n" + "=" * 60)
print("All tool execution trigger tests passed!")
print("=" * 60)
if __name__ == "__main__":
try:
asyncio.run(test_tool_execution_triggered())
except AssertionError as e:
print(f"\n❌ Test failed: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
except Exception as e:
print(f"\n❌ Test error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)