Complete AI Data Analysis Agent implementation with 95.7% test coverage
This commit is contained in:
320
tests/test_models.py
Normal file
320
tests/test_models.py
Normal file
@@ -0,0 +1,320 @@
|
||||
"""Unit tests for core data models."""
|
||||
|
||||
import pytest
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
from src.models import (
|
||||
ColumnInfo,
|
||||
DataProfile,
|
||||
AnalysisObjective,
|
||||
RequirementSpec,
|
||||
AnalysisTask,
|
||||
AnalysisPlan,
|
||||
AnalysisResult,
|
||||
)
|
||||
|
||||
|
||||
class TestColumnInfo:
|
||||
"""Tests for ColumnInfo model."""
|
||||
|
||||
def test_create_column_info(self):
|
||||
"""Test creating a ColumnInfo instance."""
|
||||
col = ColumnInfo(
|
||||
name='age',
|
||||
dtype='numeric',
|
||||
missing_rate=0.05,
|
||||
unique_count=50,
|
||||
sample_values=[25, 30, 35, 40, 45],
|
||||
statistics={'mean': 35.5, 'std': 10.2}
|
||||
)
|
||||
|
||||
assert col.name == 'age'
|
||||
assert col.dtype == 'numeric'
|
||||
assert col.missing_rate == 0.05
|
||||
assert col.unique_count == 50
|
||||
assert len(col.sample_values) == 5
|
||||
assert col.statistics['mean'] == 35.5
|
||||
|
||||
def test_column_info_serialization(self):
|
||||
"""Test ColumnInfo to_dict and from_dict."""
|
||||
col = ColumnInfo(
|
||||
name='status',
|
||||
dtype='categorical',
|
||||
missing_rate=0.0,
|
||||
unique_count=3,
|
||||
sample_values=['open', 'closed', 'pending']
|
||||
)
|
||||
|
||||
col_dict = col.to_dict()
|
||||
assert col_dict['name'] == 'status'
|
||||
assert col_dict['dtype'] == 'categorical'
|
||||
|
||||
col_restored = ColumnInfo.from_dict(col_dict)
|
||||
assert col_restored.name == col.name
|
||||
assert col_restored.dtype == col.dtype
|
||||
assert col_restored.sample_values == col.sample_values
|
||||
|
||||
def test_column_info_json(self):
|
||||
"""Test ColumnInfo JSON serialization."""
|
||||
col = ColumnInfo(
|
||||
name='created_at',
|
||||
dtype='datetime',
|
||||
missing_rate=0.0,
|
||||
unique_count=1000
|
||||
)
|
||||
|
||||
json_str = col.to_json()
|
||||
col_restored = ColumnInfo.from_json(json_str)
|
||||
|
||||
assert col_restored.name == col.name
|
||||
assert col_restored.dtype == col.dtype
|
||||
|
||||
|
||||
class TestDataProfile:
|
||||
"""Tests for DataProfile model."""
|
||||
|
||||
def test_create_data_profile(self):
|
||||
"""Test creating a DataProfile instance."""
|
||||
columns = [
|
||||
ColumnInfo(name='id', dtype='numeric', missing_rate=0.0, unique_count=100),
|
||||
ColumnInfo(name='status', dtype='categorical', missing_rate=0.0, unique_count=3),
|
||||
]
|
||||
|
||||
profile = DataProfile(
|
||||
file_path='test.csv',
|
||||
row_count=100,
|
||||
column_count=2,
|
||||
columns=columns,
|
||||
inferred_type='ticket',
|
||||
key_fields={'status': 'ticket status'},
|
||||
quality_score=85.5,
|
||||
summary='Test data profile'
|
||||
)
|
||||
|
||||
assert profile.file_path == 'test.csv'
|
||||
assert profile.row_count == 100
|
||||
assert profile.inferred_type == 'ticket'
|
||||
assert len(profile.columns) == 2
|
||||
assert profile.quality_score == 85.5
|
||||
|
||||
def test_data_profile_serialization(self):
|
||||
"""Test DataProfile to_dict and from_dict."""
|
||||
columns = [
|
||||
ColumnInfo(name='id', dtype='numeric', missing_rate=0.0, unique_count=100),
|
||||
]
|
||||
|
||||
profile = DataProfile(
|
||||
file_path='test.csv',
|
||||
row_count=100,
|
||||
column_count=1,
|
||||
columns=columns,
|
||||
inferred_type='sales'
|
||||
)
|
||||
|
||||
profile_dict = profile.to_dict()
|
||||
assert profile_dict['file_path'] == 'test.csv'
|
||||
assert profile_dict['inferred_type'] == 'sales'
|
||||
assert len(profile_dict['columns']) == 1
|
||||
|
||||
profile_restored = DataProfile.from_dict(profile_dict)
|
||||
assert profile_restored.file_path == profile.file_path
|
||||
assert profile_restored.row_count == profile.row_count
|
||||
assert len(profile_restored.columns) == len(profile.columns)
|
||||
|
||||
|
||||
class TestAnalysisObjective:
|
||||
"""Tests for AnalysisObjective model."""
|
||||
|
||||
def test_create_objective(self):
|
||||
"""Test creating an AnalysisObjective instance."""
|
||||
obj = AnalysisObjective(
|
||||
name='Health Analysis',
|
||||
description='Analyze ticket health',
|
||||
metrics=['close_rate', 'avg_duration'],
|
||||
priority=5
|
||||
)
|
||||
|
||||
assert obj.name == 'Health Analysis'
|
||||
assert obj.priority == 5
|
||||
assert len(obj.metrics) == 2
|
||||
|
||||
def test_objective_serialization(self):
|
||||
"""Test AnalysisObjective serialization."""
|
||||
obj = AnalysisObjective(
|
||||
name='Test',
|
||||
description='Test objective',
|
||||
metrics=['metric1']
|
||||
)
|
||||
|
||||
obj_dict = obj.to_dict()
|
||||
obj_restored = AnalysisObjective.from_dict(obj_dict)
|
||||
|
||||
assert obj_restored.name == obj.name
|
||||
assert obj_restored.metrics == obj.metrics
|
||||
|
||||
|
||||
class TestRequirementSpec:
|
||||
"""Tests for RequirementSpec model."""
|
||||
|
||||
def test_create_requirement_spec(self):
|
||||
"""Test creating a RequirementSpec instance."""
|
||||
objectives = [
|
||||
AnalysisObjective(name='Obj1', description='First objective', metrics=['m1'])
|
||||
]
|
||||
|
||||
spec = RequirementSpec(
|
||||
user_input='Analyze ticket health',
|
||||
objectives=objectives,
|
||||
constraints=['no_pii'],
|
||||
expected_outputs=['report', 'charts']
|
||||
)
|
||||
|
||||
assert spec.user_input == 'Analyze ticket health'
|
||||
assert len(spec.objectives) == 1
|
||||
assert len(spec.constraints) == 1
|
||||
|
||||
def test_requirement_spec_serialization(self):
|
||||
"""Test RequirementSpec serialization."""
|
||||
objectives = [
|
||||
AnalysisObjective(name='Obj1', description='Test', metrics=['m1'])
|
||||
]
|
||||
|
||||
spec = RequirementSpec(
|
||||
user_input='Test input',
|
||||
objectives=objectives
|
||||
)
|
||||
|
||||
spec_dict = spec.to_dict()
|
||||
spec_restored = RequirementSpec.from_dict(spec_dict)
|
||||
|
||||
assert spec_restored.user_input == spec.user_input
|
||||
assert len(spec_restored.objectives) == len(spec.objectives)
|
||||
|
||||
|
||||
class TestAnalysisTask:
|
||||
"""Tests for AnalysisTask model."""
|
||||
|
||||
def test_create_task(self):
|
||||
"""Test creating an AnalysisTask instance."""
|
||||
task = AnalysisTask(
|
||||
id='task_1',
|
||||
name='Calculate statistics',
|
||||
description='Calculate basic statistics',
|
||||
priority=5,
|
||||
dependencies=['task_0'],
|
||||
required_tools=['stats_tool'],
|
||||
expected_output='Statistics summary'
|
||||
)
|
||||
|
||||
assert task.id == 'task_1'
|
||||
assert task.priority == 5
|
||||
assert len(task.dependencies) == 1
|
||||
assert task.status == 'pending'
|
||||
|
||||
def test_task_serialization(self):
|
||||
"""Test AnalysisTask serialization."""
|
||||
task = AnalysisTask(
|
||||
id='task_1',
|
||||
name='Test task',
|
||||
description='Test',
|
||||
priority=3
|
||||
)
|
||||
|
||||
task_dict = task.to_dict()
|
||||
task_restored = AnalysisTask.from_dict(task_dict)
|
||||
|
||||
assert task_restored.id == task.id
|
||||
assert task_restored.name == task.name
|
||||
|
||||
|
||||
class TestAnalysisPlan:
|
||||
"""Tests for AnalysisPlan model."""
|
||||
|
||||
def test_create_plan(self):
|
||||
"""Test creating an AnalysisPlan instance."""
|
||||
objectives = [
|
||||
AnalysisObjective(name='Obj1', description='Test', metrics=['m1'])
|
||||
]
|
||||
tasks = [
|
||||
AnalysisTask(id='t1', name='Task 1', description='Test', priority=5)
|
||||
]
|
||||
|
||||
plan = AnalysisPlan(
|
||||
objectives=objectives,
|
||||
tasks=tasks,
|
||||
tool_config={'tool1': 'config1'},
|
||||
estimated_duration=300
|
||||
)
|
||||
|
||||
assert len(plan.objectives) == 1
|
||||
assert len(plan.tasks) == 1
|
||||
assert plan.estimated_duration == 300
|
||||
assert isinstance(plan.created_at, datetime)
|
||||
|
||||
def test_plan_serialization(self):
|
||||
"""Test AnalysisPlan serialization."""
|
||||
objectives = [
|
||||
AnalysisObjective(name='Obj1', description='Test', metrics=['m1'])
|
||||
]
|
||||
tasks = [
|
||||
AnalysisTask(id='t1', name='Task 1', description='Test', priority=5)
|
||||
]
|
||||
|
||||
plan = AnalysisPlan(objectives=objectives, tasks=tasks)
|
||||
|
||||
plan_dict = plan.to_dict()
|
||||
plan_restored = AnalysisPlan.from_dict(plan_dict)
|
||||
|
||||
assert len(plan_restored.objectives) == len(plan.objectives)
|
||||
assert len(plan_restored.tasks) == len(plan.tasks)
|
||||
|
||||
|
||||
class TestAnalysisResult:
|
||||
"""Tests for AnalysisResult model."""
|
||||
|
||||
def test_create_result(self):
|
||||
"""Test creating an AnalysisResult instance."""
|
||||
result = AnalysisResult(
|
||||
task_id='task_1',
|
||||
task_name='Test task',
|
||||
success=True,
|
||||
data={'count': 100},
|
||||
visualizations=['chart1.png'],
|
||||
insights=['Key finding 1'],
|
||||
execution_time=5.5
|
||||
)
|
||||
|
||||
assert result.task_id == 'task_1'
|
||||
assert result.success is True
|
||||
assert result.data['count'] == 100
|
||||
assert len(result.insights) == 1
|
||||
assert result.error is None
|
||||
|
||||
def test_result_with_error(self):
|
||||
"""Test AnalysisResult with error."""
|
||||
result = AnalysisResult(
|
||||
task_id='task_1',
|
||||
task_name='Failed task',
|
||||
success=False,
|
||||
error='Tool execution failed'
|
||||
)
|
||||
|
||||
assert result.success is False
|
||||
assert result.error == 'Tool execution failed'
|
||||
|
||||
def test_result_serialization(self):
|
||||
"""Test AnalysisResult serialization."""
|
||||
result = AnalysisResult(
|
||||
task_id='task_1',
|
||||
task_name='Test',
|
||||
success=True,
|
||||
data={'key': 'value'}
|
||||
)
|
||||
|
||||
result_dict = result.to_dict()
|
||||
result_restored = AnalysisResult.from_dict(result_dict)
|
||||
|
||||
assert result_restored.task_id == result.task_id
|
||||
assert result_restored.success == result.success
|
||||
assert result_restored.data == result.data
|
||||
Reference in New Issue
Block a user