refactor: rename stock-lab → stock (graduation)

- git mv stock-lab/ → stock/
- docker-compose.yml: 서비스 키 + container_name + build.context +
  frontend.depends_on + agent-office STOCK_LAB_URL → STOCK_URL
- agent-office/app: config.py, service_proxy.py, agents/stock.py, tests/
  STOCK_LAB_URL → STOCK_URL
- nginx/default.conf: proxy_pass http://stock-labhttp://stock (3 lines)
- CLAUDE.md / README.md / STATUS.md / scripts/ 문구 갱신
- stock/ 내부 자기 참조 갱신

lab 네이밍 정책 (feedback_lab_naming.md) graduation.
API URL / Python import / DB 파일명 변경 없음.
This commit is contained in:
2026-05-15 01:45:22 +09:00
parent 8812bd870a
commit ace0339d33
74 changed files with 67 additions and 67 deletions

View File

@@ -0,0 +1,70 @@
import json
import pytest
from unittest.mock import AsyncMock, MagicMock
from app.screener.ai_news import analyzer
def _mk_llm(content_text: str, in_tokens: int = 100, out_tokens: int = 20):
llm = AsyncMock()
resp = MagicMock()
block = MagicMock()
block.text = content_text
resp.content = [block]
resp.usage = MagicMock(input_tokens=in_tokens, output_tokens=out_tokens)
llm.messages = MagicMock()
llm.messages.create = AsyncMock(return_value=resp)
return llm
NEWS = [
{"title": "삼성전자, HBM 양산", "summary": "1분기 영업이익 사상 최대", "pub_date": "2026-05-14"},
{"title": "메모리 가격 반등", "summary": "", "pub_date": "2026-05-14"},
]
@pytest.mark.asyncio
async def test_score_sentiment_success_parses_json():
llm = _mk_llm(json.dumps({"score": 7.5, "reason": "HBM 호재"}))
out = await analyzer.score_sentiment(llm, "005930", NEWS, name="삼성전자")
assert out["ticker"] == "005930"
assert out["score_raw"] == 7.5
assert out["reason"] == "HBM 호재"
assert out["news_count"] == 2
assert out["tokens_input"] == 100
assert out["tokens_output"] == 20
@pytest.mark.asyncio
async def test_score_sentiment_json_parse_fail_returns_zero():
llm = _mk_llm("not valid json")
out = await analyzer.score_sentiment(llm, "005930", NEWS)
assert out["score_raw"] == 0.0
assert "parse fail" in out["reason"]
assert out["tokens_input"] == 100 # 호출은 발생했음
@pytest.mark.asyncio
async def test_score_sentiment_clamps_out_of_range():
llm = _mk_llm(json.dumps({"score": 15.0, "reason": "초강세"}))
out = await analyzer.score_sentiment(llm, "005930", NEWS)
assert out["score_raw"] == 10.0 # +10 클램프
@pytest.mark.asyncio
async def test_score_sentiment_clamps_negative_out_of_range():
llm = _mk_llm(json.dumps({"score": -42.0, "reason": "초악재"}))
out = await analyzer.score_sentiment(llm, "005930", NEWS)
assert out["score_raw"] == -10.0
@pytest.mark.asyncio
async def test_score_sentiment_includes_summary_in_prompt():
"""summary 가 있으면 prompt 에 포함, 없으면 title 만."""
llm = _mk_llm(json.dumps({"score": 5.0, "reason": "ok"}))
await analyzer.score_sentiment(llm, "005930", NEWS, name="삼성전자")
call = llm.messages.create.call_args
user_msg = call.kwargs["messages"][0]["content"]
assert "1분기 영업이익 사상 최대" in user_msg # summary 포함
assert "삼성전자, HBM 양산" in user_msg # title 포함
assert "2026-05-14" in user_msg # pub_date 포함

View File

@@ -0,0 +1,108 @@
import datetime as dt
import sqlite3
import pytest
from app.screener.ai_news import articles_source
from app.screener.schema import ensure_screener_schema
@pytest.fixture
def conn():
c = sqlite3.connect(":memory:")
c.row_factory = sqlite3.Row
ensure_screener_schema(c)
yield c
c.close()
def _seed_master(conn, ticker, name):
conn.execute(
"INSERT INTO krx_master (ticker, name, market, market_cap, updated_at) "
"VALUES (?, ?, 'KOSPI', 1000000000, datetime('now'))",
(ticker, name),
)
def _seed_article(conn, title, summary="", crawled_at="2026-05-14T07:30:00"):
import hashlib
h = hashlib.md5(f"{title}|x".encode()).hexdigest()
conn.execute(
"INSERT INTO articles (hash, title, summary, link, press, pub_date, crawled_at) "
"VALUES (?, ?, ?, '', '', '2026-05-14', ?)",
(h, title, summary, crawled_at),
)
ASOF = dt.date(2026, 5, 14)
def test_single_ticker_match_in_title(conn):
_seed_master(conn, "005930", "삼성전자")
_seed_article(conn, "삼성전자, HBM 양산 가시화")
conn.commit()
out, stats = articles_source.gather_articles_for_tickers(
conn, ["005930"], ASOF, window_days=1, max_per_ticker=5,
)
assert len(out["005930"]) == 1
assert out["005930"][0]["title"] == "삼성전자, HBM 양산 가시화"
assert stats["matched_pairs"] == 1
assert stats["hit_tickers"] == 1
def test_single_ticker_match_in_summary(conn):
_seed_master(conn, "005930", "삼성전자")
_seed_article(conn, "메모리 시장 회복세", summary="삼성전자가 1분기 어닝 서프라이즈")
conn.commit()
out, _ = articles_source.gather_articles_for_tickers(
conn, ["005930"], ASOF, window_days=1, max_per_ticker=5,
)
assert len(out["005930"]) == 1
def test_multi_ticker_match(conn):
_seed_master(conn, "005930", "삼성전자")
_seed_master(conn, "000660", "SK하이닉스")
_seed_article(conn, "삼성전자와 SK하이닉스, 메모리 양산 경쟁")
conn.commit()
out, stats = articles_source.gather_articles_for_tickers(
conn, ["005930", "000660"], ASOF, window_days=1, max_per_ticker=5,
)
assert len(out["005930"]) == 1
assert len(out["000660"]) == 1
assert stats["matched_pairs"] == 2
assert stats["hit_tickers"] == 2
def test_no_match_returns_empty_list(conn):
_seed_master(conn, "005930", "삼성전자")
_seed_article(conn, "엔비디아 실적 발표", summary="AI 칩 수요 견조")
conn.commit()
out, stats = articles_source.gather_articles_for_tickers(
conn, ["005930"], ASOF, window_days=1, max_per_ticker=5,
)
assert out["005930"] == []
assert stats["matched_pairs"] == 0
assert stats["hit_tickers"] == 0
def test_max_per_ticker_caps_results(conn):
_seed_master(conn, "005930", "삼성전자")
for i in range(6):
_seed_article(conn, f"삼성전자 뉴스 #{i}", crawled_at=f"2026-05-14T0{i}:00:00")
conn.commit()
out, _ = articles_source.gather_articles_for_tickers(
conn, ["005930"], ASOF, window_days=1, max_per_ticker=5,
)
assert len(out["005930"]) == 5
def test_window_days_filters_old_articles(conn):
_seed_master(conn, "005930", "삼성전자")
_seed_article(conn, "삼성전자 최신 뉴스", crawled_at="2026-05-14T07:00:00")
_seed_article(conn, "삼성전자 오래된 뉴스", crawled_at="2026-05-01T07:00:00")
conn.commit()
out, _ = articles_source.gather_articles_for_tickers(
conn, ["005930"], ASOF, window_days=1, max_per_ticker=5,
)
assert len(out["005930"]) == 1
assert "최신" in out["005930"][0]["title"]

View File

@@ -0,0 +1,57 @@
import datetime as dt
import pandas as pd
import pytest
from app.screener.nodes.ai_news import AiNewsSentiment
class FakeCtx:
def __init__(self, df=None):
self.news_sentiment = df
self.asof = dt.date(2026, 5, 13)
def test_compute_empty_context():
out = AiNewsSentiment().compute(FakeCtx(None), {"min_news_count": 1})
assert out.empty
def test_compute_with_data_percentile_ranks():
df = pd.DataFrame([
{"ticker": "A", "score_raw": -5.0, "news_count": 3},
{"ticker": "B", "score_raw": 0.0, "news_count": 3},
{"ticker": "C", "score_raw": 8.0, "news_count": 3},
])
out = AiNewsSentiment().compute(FakeCtx(df), {"min_news_count": 1})
assert len(out) == 3
# percentile rank: A (lowest) < B < C (highest)
assert out.loc["A"] < out.loc["B"] < out.loc["C"]
# all within [0, 100]
assert (out >= 0).all() and (out <= 100).all()
def test_compute_filters_by_min_news_count():
df = pd.DataFrame([
{"ticker": "A", "score_raw": -5.0, "news_count": 0}, # 필터됨
{"ticker": "B", "score_raw": 0.0, "news_count": 2},
{"ticker": "C", "score_raw": 8.0, "news_count": 5},
])
out = AiNewsSentiment().compute(FakeCtx(df), {"min_news_count": 1})
assert "A" not in out.index
assert "B" in out.index
assert "C" in out.index
def test_compute_all_filtered_returns_empty():
df = pd.DataFrame([
{"ticker": "A", "score_raw": 5.0, "news_count": 0},
])
out = AiNewsSentiment().compute(FakeCtx(df), {"min_news_count": 1})
assert out.empty
def test_metadata():
n = AiNewsSentiment()
assert n.name == "ai_news"
assert "AI" in n.label or "뉴스" in n.label
assert n.default_params == {"min_news_count": 1}
assert "min_news_count" in n.param_schema["properties"]

View File

@@ -0,0 +1,145 @@
import datetime as dt
import sqlite3
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from app.screener.ai_news import pipeline
from app.screener.schema import ensure_screener_schema
@pytest.fixture
def conn():
c = sqlite3.connect(":memory:")
c.row_factory = sqlite3.Row
ensure_screener_schema(c)
# 시총 상위 3종목 시드
c.execute("INSERT INTO krx_master (ticker, name, market, market_cap, updated_at) "
"VALUES (?, ?, 'KOSPI', ?, datetime('now'))", ("005930", "삼성전자", 9_000_000))
c.execute("INSERT INTO krx_master (ticker, name, market, market_cap, updated_at) "
"VALUES (?, ?, 'KOSPI', ?, datetime('now'))", ("000660", "SK하이닉스", 8_000_000))
c.execute("INSERT INTO krx_master (ticker, name, market, market_cap, updated_at) "
"VALUES (?, ?, 'KOSPI', ?, datetime('now'))", ("373220", "LG에너지솔루션", 7_000_000))
c.commit()
yield c
c.close()
@pytest.mark.asyncio
async def test_refresh_daily_happy_path(conn):
"""3종목 mini integration — articles_source mock + analyzer mock."""
asof = dt.date(2026, 5, 13)
fake_articles_by_ticker = {
"005930": [{"title": "삼성 뉴스", "summary": "", "press": "", "pub_date": ""}],
"000660": [{"title": "SK 뉴스", "summary": "", "press": "", "pub_date": ""}],
"373220": [{"title": "LG 뉴스", "summary": "", "press": "", "pub_date": ""}],
}
fake_stats = {"total_articles": 3, "matched_pairs": 3, "hit_tickers": 3}
scores_by_ticker = {
"005930": 7.5, "000660": 4.0, "373220": -6.0,
}
async def fake_score(llm, ticker, news, *, name=None, model="m"):
return {
"ticker": ticker, "score_raw": scores_by_ticker[ticker],
"reason": f"r{ticker}", "news_count": 1,
"tokens_input": 100, "tokens_output": 20, "model": model,
}
with patch.object(pipeline, "articles_source") as mas, \
patch.object(pipeline, "_analyzer") as ma, \
patch.object(pipeline, "_make_llm") as ml:
mas.gather_articles_for_tickers = MagicMock(
return_value=(fake_articles_by_ticker, fake_stats)
)
ma.score_sentiment = fake_score
ml.return_value.__aenter__.return_value = AsyncMock()
ml.return_value.__aexit__.return_value = None
result = await pipeline.refresh_daily(conn, asof, concurrency=3)
assert result["asof"] == "2026-05-13"
assert result["updated"] == 3
assert result["failures"] == []
assert result["top_pos"][0]["ticker"] == "005930"
assert result["top_neg"][0]["ticker"] == "373220"
assert result["mapping"] == fake_stats
rows = conn.execute("SELECT ticker, score_raw, source FROM news_sentiment "
"WHERE date=?", ("2026-05-13",)).fetchall()
assert len(rows) == 3
assert all(r["source"] == "articles" for r in rows)
@pytest.mark.asyncio
async def test_refresh_daily_failures_isolated(conn):
asof = dt.date(2026, 5, 13)
fake_articles_by_ticker = {
"005930": [{"title": "h", "summary": "", "press": "", "pub_date": ""}],
"000660": [{"title": "h", "summary": "", "press": "", "pub_date": ""}],
"373220": [{"title": "h", "summary": "", "press": "", "pub_date": ""}],
}
fake_stats = {"total_articles": 3, "matched_pairs": 3, "hit_tickers": 3}
async def fake_score(llm, ticker, news, *, name=None, model="m"):
if ticker == "000660":
raise RuntimeError("llm exploded")
return {
"ticker": ticker, "score_raw": 5.0, "reason": "r", "news_count": 1,
"tokens_input": 100, "tokens_output": 20, "model": model,
}
with patch.object(pipeline, "articles_source") as mas, \
patch.object(pipeline, "_analyzer") as ma, \
patch.object(pipeline, "_make_llm") as ml:
mas.gather_articles_for_tickers = MagicMock(
return_value=(fake_articles_by_ticker, fake_stats)
)
ma.score_sentiment = fake_score
ml.return_value.__aenter__.return_value = AsyncMock()
ml.return_value.__aexit__.return_value = None
result = await pipeline.refresh_daily(conn, asof, concurrency=3)
assert result["updated"] == 2
assert len(result["failures"]) == 1
@pytest.mark.asyncio
async def test_refresh_daily_no_match_ticker_skipped(conn):
"""매핑 0인 ticker 는 LLM 호출 skip + news_sentiment 행 미생성."""
asof = dt.date(2026, 5, 13)
fake_articles_by_ticker = {
"005930": [{"title": "삼성", "summary": "", "press": "", "pub_date": ""}],
"000660": [], # 매핑 없음
"373220": [], # 매핑 없음
}
fake_stats = {"total_articles": 1, "matched_pairs": 1, "hit_tickers": 1}
async def fake_score(llm, ticker, news, *, name=None, model="m"):
return {
"ticker": ticker, "score_raw": 5.0, "reason": "r",
"news_count": 1, "tokens_input": 100, "tokens_output": 20,
"model": model,
}
with patch.object(pipeline, "articles_source") as mas, \
patch.object(pipeline, "_analyzer") as ma, \
patch.object(pipeline, "_make_llm") as ml:
mas.gather_articles_for_tickers = MagicMock(
return_value=(fake_articles_by_ticker, fake_stats)
)
ma.score_sentiment = fake_score
ml.return_value.__aenter__.return_value = AsyncMock()
ml.return_value.__aexit__.return_value = None
result = await pipeline.refresh_daily(conn, asof, concurrency=3)
assert result["updated"] == 1
rows = conn.execute("SELECT ticker FROM news_sentiment "
"WHERE date=?", ("2026-05-13",)).fetchall()
assert {r["ticker"] for r in rows} == {"005930"}
def test_top_market_cap_tickers(conn):
out = pipeline._top_market_cap_tickers(conn, n=2)
assert out == ["005930", "000660"]

View File

@@ -0,0 +1,36 @@
import datetime as dt
from unittest.mock import AsyncMock, patch
from fastapi.testclient import TestClient
from app.main import app
def test_refresh_news_sentiment_weekend_skip():
# 2026-05-16 = Saturday
client = TestClient(app)
resp = client.post(
"/api/stock/screener/snapshot/refresh-news-sentiment?asof=2026-05-16"
)
assert resp.status_code == 200
assert resp.json()["status"] == "skipped_weekend"
def test_refresh_news_sentiment_weekday_invokes_pipeline():
fake_summary = {
"asof": "2026-05-13", "updated": 3, "failures": [],
"duration_sec": 1.0, "tokens_input": 100, "tokens_output": 20,
"top_pos": [], "top_neg": [], "model": "m",
"mapping": {"total_articles": 5, "matched_pairs": 8, "hit_tickers": 3},
}
with patch("app.screener.router._ai_pipeline") as mp, \
patch("app.screener.router._ai_telegram") as mt:
mp.refresh_daily = AsyncMock(return_value=fake_summary)
mt.build_message = lambda **kw: f"TEXT_with_mapping={kw.get('mapping')}"
client = TestClient(app)
resp = client.post(
"/api/stock/screener/snapshot/refresh-news-sentiment?asof=2026-05-13"
)
assert resp.status_code == 200
body = resp.json()
assert body["mapping"]["hit_tickers"] == 3
assert "mapping=" in body["telegram_text"]

View File

@@ -0,0 +1,55 @@
import pytest
from unittest.mock import AsyncMock
from app.screener.ai_news import scraper
SAMPLE_HTML = """
<html><body>
<table class="type5"><tbody>
<tr><td class="title"><a href="/news1">삼성전자, HBM 양산 가시화</a></td><td class="date">2026.05.13 07:30</td></tr>
<tr><td class="title"><a href="/news2">삼성, 4분기 어닝 쇼크 우려</a></td><td class="date">2026.05.13 06:00</td></tr>
<tr><td class="title"><a href="/news3">메모리 시장 회복세</a></td><td class="date">2026.05.12 18:00</td></tr>
</tbody></table>
</body></html>
"""
EMPTY_HTML = "<html><body><table class='type5'><tbody></tbody></table></body></html>"
def _mk_client(status_code=200, text=SAMPLE_HTML):
client = AsyncMock()
resp = AsyncMock()
resp.status_code = status_code
resp.text = text
client.get = AsyncMock(return_value=resp)
return client
@pytest.mark.asyncio
async def test_fetch_news_success_returns_n_items():
client = _mk_client()
out = await scraper.fetch_news(client, "005930", n=2)
assert len(out) == 2
assert out[0]["title"] == "삼성전자, HBM 양산 가시화"
assert out[0]["date"] == "2026.05.13 07:30"
@pytest.mark.asyncio
async def test_fetch_news_404_returns_empty():
client = _mk_client(status_code=404, text="")
out = await scraper.fetch_news(client, "999999", n=5)
assert out == []
@pytest.mark.asyncio
async def test_fetch_news_empty_table_returns_empty():
client = _mk_client(text=EMPTY_HTML)
out = await scraper.fetch_news(client, "005930", n=5)
assert out == []
@pytest.mark.asyncio
async def test_fetch_news_n_caps_results():
client = _mk_client()
out = await scraper.fetch_news(client, "005930", n=2)
assert len(out) == 2 # 샘플에 3개 있지만 n=2로 잘림

View File

@@ -0,0 +1,79 @@
from app.screener.ai_news import telegram as tg
def _row(ticker, score, reason="r"):
return {"ticker": ticker, "score_raw": score, "reason": reason,
"news_count": 5, "tokens_input": 100, "tokens_output": 20,
"model": "m"}
def test_build_message_includes_top_sections():
msg = tg.build_message(
asof="2026-05-13",
top_pos=[_row("005930", 8.5, "HBM 호재")],
top_neg=[_row("373220", -6.3, "수주 지연")],
tokens_input=10000, tokens_output=2000,
)
assert "AI 뉴스 분석" in msg
assert "호재 Top" in msg
assert "악재 Top" in msg
assert "005930" in msg
# score는 MarkdownV2 escape 거쳐 "8\.5" 형태 ('.' 가 reserved)
assert "8\\.5" in msg
assert "HBM" in msg
assert "373220" in msg
def test_build_message_escapes_markdownv2_specials():
msg = tg.build_message(
asof="2026-05-13",
top_pos=[_row("005930", 3.0, "테스트(괄호) [대괄호]")],
top_neg=[],
tokens_input=100, tokens_output=20,
)
# MarkdownV2 특수문자 ( ) [ ] 이 escape 되어야 함
assert r"\(" in msg or r"\)" in msg
assert r"\[" in msg or r"\]" in msg
def test_build_message_cost_won_line():
msg = tg.build_message(
asof="2026-05-13", top_pos=[], top_neg=[],
tokens_input=10000, tokens_output=2000,
)
# tokens_input × 0.0013 + tokens_output × 0.0065 = 13 + 13 = ₩26
assert "₩26" in msg or "₩ 26" in msg or "" in msg
def test_build_message_empty_lists():
msg = tg.build_message(
asof="2026-05-13", top_pos=[], top_neg=[],
tokens_input=0, tokens_output=0,
)
# 빈 리스트라도 헤더는 있어야 함
assert "호재 Top" in msg
assert "악재 Top" in msg
def test_build_message_includes_mapping_line():
msg = tg.build_message(
asof="2026-05-14",
top_pos=[_row("005930", 8.5, "HBM 호재")],
top_neg=[],
tokens_input=1000, tokens_output=200,
mapping={"total_articles": 35, "matched_pairs": 50, "hit_tickers": 42},
)
assert "매핑" in msg
assert "42" in msg
assert "50" in msg
assert "35" in msg
def test_build_message_without_mapping_omits_line():
msg = tg.build_message(
asof="2026-05-14",
top_pos=[],
top_neg=[],
tokens_input=1000, tokens_output=200,
)
assert "매핑" not in msg

View File

@@ -0,0 +1,120 @@
"""Tests for ai_news validation harness (Spearman IC)."""
import datetime as dt
import sqlite3
import pytest
from app.screener.ai_news import validation
from app.screener.schema import ensure_screener_schema
@pytest.fixture
def conn():
c = sqlite3.connect(":memory:")
c.row_factory = sqlite3.Row
ensure_screener_schema(c)
yield c
c.close()
def _seed_sentiment(conn, date, ticker, score, news_count=3):
conn.execute(
"INSERT INTO news_sentiment (ticker, date, score_raw, reason, news_count, "
"tokens_input, tokens_output, model) "
"VALUES (?, ?, ?, 'r', ?, 100, 20, 'm')",
(ticker, date, score, news_count),
)
def _seed_price(conn, ticker, date, close):
conn.execute(
"INSERT INTO krx_daily_prices (ticker, date, close) VALUES (?, ?, ?)",
(ticker, date, close),
)
def test_empty_db_returns_skip(conn):
out = validation.compute_ic(conn, days=30, horizon=1, asof_today=dt.date(2026, 5, 14))
assert out["ic_count"] == 0
assert out["verdict"] == "skip"
assert out["ic_mean"] is None
def test_strong_positive_ic(conn):
"""5종목 × 12일 — 점수가 높을수록 다음날 수익률 높게 시드 → IC ≈ +1.
score 가 변하지 않는 ticker × day-wise close 로 정확한 monotonic 관계 시드.
"""
base_date = dt.date(2026, 5, 1)
# 가격 13일치 시드 (day0..day12). ticker별 base 다르고 (score-기반) day마다 다른 close.
for i, ticker in enumerate(["A", "B", "C", "D", "E"]):
score = i * 2.0 - 4.0 # ticker별 score 고정 (-4, -2, 0, +2, +4)
# day 0 close=100, day n close=100+(score × n)
for day in range(13):
d = (base_date + dt.timedelta(days=day)).isoformat()
_seed_price(conn, ticker, d, 100.0 + score * day)
if day < 12:
_seed_sentiment(conn, d, ticker, score)
conn.commit()
out = validation.compute_ic(conn, days=30, horizon=1, asof_today=dt.date(2026, 5, 14))
assert out["ic_count"] >= 10
assert out["ic_mean"] > 0.5
assert out["verdict"] == "strong"
def test_zero_ic_random_data(conn):
"""점수와 수익률이 무관 → IC ≈ 0."""
import random
random.seed(42)
base_date = dt.date(2026, 5, 1)
for ticker in ["A", "B", "C", "D", "E", "F", "G"]:
for day in range(13):
d = (base_date + dt.timedelta(days=day)).isoformat()
_seed_price(conn, ticker, d, 100.0 + random.uniform(-5, 5))
if day < 12:
_seed_sentiment(conn, d, ticker, random.uniform(-10, 10))
conn.commit()
out = validation.compute_ic(conn, days=30, horizon=1, asof_today=dt.date(2026, 5, 14))
assert out["ic_count"] >= 10
assert abs(out["ic_mean"]) < 0.3 # 약한 신호 — verdict는 weak 가능
assert out["verdict"] in ("weak", "strong") # 시드에 따라 약간 흔들림
def test_min_news_count_filter(conn):
"""news_count < min_news_count 인 row 는 제외."""
_seed_sentiment(conn, "2026-05-13", "A", 5.0, news_count=0)
_seed_sentiment(conn, "2026-05-13", "B", -5.0, news_count=3)
_seed_price(conn, "A", "2026-05-13", 100.0)
_seed_price(conn, "A", "2026-05-14", 105.0)
_seed_price(conn, "B", "2026-05-13", 100.0)
_seed_price(conn, "B", "2026-05-14", 95.0)
conn.commit()
out = validation.compute_ic(
conn, days=30, horizon=1, min_news_count=1,
asof_today=dt.date(2026, 5, 14),
)
# A 가 필터됨 → 1종목만 남으면 Spearman 계산 불가 (< 5) → skip
assert out["ic_count"] == 0
def test_horizon_5_days(conn):
"""horizon=5 면 close[date+5] / close[date] - 1 사용."""
base_date = dt.date(2026, 5, 1)
for day in range(20):
d = (base_date + dt.timedelta(days=day)).isoformat()
for i, ticker in enumerate(["A", "B", "C", "D", "E"]):
_seed_sentiment(conn, d, ticker, i * 2.0 - 4.0)
# 가격: A=오름, B=오름, C=평, D=내림, E=내림
for day in range(25):
d = (base_date + dt.timedelta(days=day)).isoformat()
for i, ticker in enumerate(["A", "B", "C", "D", "E"]):
slope = i - 2 # -2 ~ +2
_seed_price(conn, ticker, d, 100.0 + slope * day)
conn.commit()
out = validation.compute_ic(conn, days=30, horizon=5, asof_today=dt.date(2026, 5, 25))
assert out["horizon_days"] == 5
assert out["ic_count"] > 0