"""Service for interacting with LLM APIs to generate test cases."""
import logging
import requests
import json
from dataclasses import dataclass
from typing import List, Dict, Any, Optional
from config import settings
from services.jira_service import JiraTicket
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@dataclass
class TestStep:
"""Represents a step in a test case."""
action: str
expected_result: str
@dataclass
class TestCase:
"""Represents a test case generated by the LLM."""
title: str
description: str
steps: List[TestStep]
ticket_key: str # Reference to the original Jira ticket
class LLMService:
"""Service for generating test cases using an LLM."""
def __init__(self):
"""Initialize the LLM client with API key."""
try:
# For Ask Sage API
self.api_url = "https://api.asksage.ai/server/query"
self.api_key = settings.asksage_api_key
self.headers = {
"x-access-tokens": self.api_key,
"Content-Type": "application/json"
}
logger.info("LLM service initialized successfully with Ask Sage API")
except Exception as e:
logger.error(f"Failed to initialize LLM service: {e}")
raise ConnectionError(f"Could not set up LLM service: {e}")
def generate_test_cases(self, ticket: JiraTicket, num_cases: int = 3) -> List[TestCase]:
"""
Generate test cases for a given ticket using the LLM.
Args:
ticket: JiraTicket object containing context for test case generation
num_cases: Number of test cases to generate
Returns:
List of TestCase objects
"""
try:
# Construct the prompt for the LLM
prompt = self._create_prompt(ticket, num_cases)
# Prepare system prompt
system_prompt = "You are a QA engineer creating test cases based on ticket requirements. Format your response as JSON."
# Call the Ask Sage API
payload = {
"message": prompt,
"persona": 1, # Using default persona
"system_prompt": system_prompt
}
logger.info(f"Sending request to Ask Sage API for ticket {ticket.key}")
response = requests.post(self.api_url, headers=self.headers, json=payload, timeout=30)
response.raise_for_status()
# Parse the response
result = response.json().get("response", "")
logger.info(f"Received response from Ask Sage API for ticket {ticket.key}")
# Parse the response into test cases
test_cases = self._parse_llm_response(result, ticket.key)
logger.info(f"Generated {len(test_cases)} test cases for ticket {ticket.key}")
return test_cases
except Exception as e:
logger.error(f"Failed to generate test cases for ticket {ticket.key}: {e}")
# Return a single error test case
return [
TestCase(
title=f"Error generating test case for {ticket.key}",
description=f"Failed to generate test case: {str(e)}",
steps=[TestStep(action="Contact developer", expected_result="Resolve the error")],
ticket_key=ticket.key
)
]
def _create_prompt(self, ticket: JiraTicket, num_cases: int) -> str:
"""
Create a prompt for the LLM to generate test cases.
Args:
ticket: JiraTicket object
num_cases: Number of test cases to generate
Returns:
Formatted prompt string
"""
return f"""
Please create {num_cases} test cases for the following Jira ticket:
TICKET KEY: {ticket.key}
TITLE: {ticket.title}
DESCRIPTION: {ticket.description}
ACCEPTANCE CRITERIA: {ticket.acceptance_criteria}
DESIGN REQUIREMENTS: {ticket.design_requirements}
For each test case, provide:
1. A clear title
2. A brief description
3. A list of test steps, each with an action and expected result
Format your response as a JSON array with the following structure:
[
{{
"title": "Test Case Title",
"description": "Test Case Description",
"steps": [
{{
"action": "Step action",
"expected_result": "Expected result"
}}
]
}}
]
Important: Your response MUST start with a JSON array using square brackets [ ] and contain at least one test case.
Do not include any explanatory text before or after the JSON.
Make sure the test cases are comprehensive and cover all acceptance criteria.
"""
def _parse_llm_response(self, response: str, ticket_key: str) -> List[TestCase]:
"""
Parse the LLM response into structured TestCase objects.
Args:
response: Raw response from the LLM
ticket_key: Key of the original ticket
Returns:
List of TestCase objects
"""
try:
logger.info(f"Raw response from Ask Sage: {response}")
# Look for JSON in the response
# First, try to find array notation
start_idx = response.find('[')
end_idx = response.rfind(']') + 1
# If no array found, look for object notation (single test case)
if start_idx == -1 or end_idx == 0:
# Look for json prefix that Ask Sage might add
if "json" in response.lower() and "{" in response:
# Remove "json" prefix if present
json_start = response.lower().find("json")
if json_start >= 0:
response = response[json_start + 4:].strip()
start_idx = response.find('{')
end_idx = response.rfind('}') + 1
if start_idx == -1 or end_idx == 0:
raise ValueError("Could not find JSON object or array in the LLM response")
# For a single object, wrap it in an array for consistent processing
json_str = "[" + response[start_idx:end_idx] + "]"
else:
json_str = response[start_idx:end_idx]
logger.info(f"Extracted JSON: {json_str}")
test_cases_data = json.loads(json_str)
# Convert the parsed JSON to TestCase objects
test_cases = []
for tc_data in test_cases_data:
# Handle steps if present
steps = []
if "steps" in tc_data:
for step in tc_data.get("steps", []):
steps.append(
TestStep(
action=step.get("action", ""),
expected_result=step.get("expected_result", "")
)
)
test_case = TestCase(
title=tc_data.get("title", ""),
description=tc_data.get("description", ""),
steps=steps,
ticket_key=ticket_key
)
test_cases.append(test_case)
return test_cases
except Exception as e:
logger.error(f"Failed to parse LLM response: {e}")
logger.error(f"Raw response that failed parsing: {response}")
# Create a default test case indicating the error
return [
TestCase(
title=f"Error parsing test case for {ticket_key}",
description=f"Failed to parse LLM response: {str(e)}",
steps=[TestStep(action="Contact developer", expected_result="Resolve the error")],
ticket_key=ticket_key
)
]