From e16cf8f817d55cc8e1132ffbecb0c7aa0b8c4569 Mon Sep 17 00:00:00 2001 From: gahusb Date: Wed, 15 Apr 2026 08:27:07 +0900 Subject: [PATCH] =?UTF-8?q?feat(agent-office):=20=ED=81=90=EB=A0=88?= =?UTF-8?q?=EC=9D=B4=ED=84=B0=20=EC=9D=91=EB=8B=B5=20=EA=B2=80=EC=A6=9D=20?= =?UTF-8?q?=EC=8A=A4=ED=82=A4=EB=A7=88=20+=20=ED=85=8C=EC=8A=A4=ED=8A=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- agent-office/app/curator/__init__.py | 0 agent-office/app/curator/schema.py | 41 ++++++++++++++++ agent-office/tests/test_curator_schema.py | 60 +++++++++++++++++++++++ 3 files changed, 101 insertions(+) create mode 100644 agent-office/app/curator/__init__.py create mode 100644 agent-office/app/curator/schema.py create mode 100644 agent-office/tests/test_curator_schema.py diff --git a/agent-office/app/curator/__init__.py b/agent-office/app/curator/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agent-office/app/curator/schema.py b/agent-office/app/curator/schema.py new file mode 100644 index 0000000..9eb87c7 --- /dev/null +++ b/agent-office/app/curator/schema.py @@ -0,0 +1,41 @@ +from typing import List, Literal +from pydantic import BaseModel, Field, field_validator + + +class Pick(BaseModel): + numbers: List[int] = Field(min_length=6, max_length=6) + risk_tag: Literal["안정", "균형", "공격"] + reason: str = Field(max_length=80) + + @field_validator("numbers") + @classmethod + def _check_numbers(cls, v): + if len(set(v)) != 6: + raise ValueError("numbers must be 6 unique integers") + if any(n < 1 or n > 45 for n in v): + raise ValueError("numbers must be within 1..45") + return sorted(v) + + +class Narrative(BaseModel): + headline: str + summary_3lines: List[str] = Field(min_length=3, max_length=3) + hot_cold_comment: str = "" + warnings: str = "" + + +class CuratorOutput(BaseModel): + picks: List[Pick] + narrative: Narrative + confidence: int = Field(ge=0, le=100) + + +def validate_response(data: dict, candidate_numbers: List[List[int]]) -> CuratorOutput: + out = CuratorOutput.model_validate(data) + if len(out.picks) != 5: + raise ValueError("picks must have exactly 5 sets") + candidate_set = {tuple(sorted(c)) for c in candidate_numbers} + for p in out.picks: + if tuple(p.numbers) not in candidate_set: + raise ValueError(f"pick {p.numbers} not in candidates") + return out diff --git a/agent-office/tests/test_curator_schema.py b/agent-office/tests/test_curator_schema.py new file mode 100644 index 0000000..6ee6cb4 --- /dev/null +++ b/agent-office/tests/test_curator_schema.py @@ -0,0 +1,60 @@ +import pytest +from app.curator.schema import validate_response, CuratorOutput + + +CANDIDATE_NUMBERS = [ + [1, 2, 3, 4, 5, 6], + [7, 8, 9, 10, 11, 12], + [13, 14, 15, 16, 17, 18], + [19, 20, 21, 22, 23, 24], + [25, 26, 27, 28, 29, 30], + [31, 32, 33, 34, 35, 36], +] + + +def _valid_payload(): + return { + "picks": [ + {"numbers": s, "risk_tag": "안정", "reason": "test"} + for s in CANDIDATE_NUMBERS[:5] + ], + "narrative": { + "headline": "h", "summary_3lines": ["a", "b", "c"], + "hot_cold_comment": "hc", "warnings": "", + }, + "confidence": 80, + } + + +def test_valid_payload_passes(): + result = validate_response(_valid_payload(), CANDIDATE_NUMBERS) + assert isinstance(result, CuratorOutput) + assert len(result.picks) == 5 + + +def test_rejects_number_out_of_candidates(): + bad = _valid_payload() + bad["picks"][0]["numbers"] = [40, 41, 42, 43, 44, 45] # valid numbers but not in candidates + with pytest.raises(ValueError, match="not in candidates"): + validate_response(bad, CANDIDATE_NUMBERS) + + +def test_rejects_wrong_pick_count(): + bad = _valid_payload() + bad["picks"] = bad["picks"][:3] + with pytest.raises(ValueError, match="exactly 5"): + validate_response(bad, CANDIDATE_NUMBERS) + + +def test_rejects_duplicate_numbers_within_set(): + bad = _valid_payload() + bad["picks"][0]["numbers"] = [1, 1, 2, 3, 4, 5] + with pytest.raises(ValueError): + validate_response(bad, CANDIDATE_NUMBERS) + + +def test_rejects_invalid_risk_tag(): + bad = _valid_payload() + bad["picks"][0]["risk_tag"] = "미친" + with pytest.raises(ValueError): + validate_response(bad, CANDIDATE_NUMBERS)