refactor: rename stock-lab → stock (graduation)
- git mv stock-lab/ → stock/ - docker-compose.yml: 서비스 키 + container_name + build.context + frontend.depends_on + agent-office STOCK_LAB_URL → STOCK_URL - agent-office/app: config.py, service_proxy.py, agents/stock.py, tests/ STOCK_LAB_URL → STOCK_URL - nginx/default.conf: proxy_pass http://stock-lab → http://stock (3 lines) - CLAUDE.md / README.md / STATUS.md / scripts/ 문구 갱신 - stock/ 내부 자기 참조 갱신 lab 네이밍 정책 (feedback_lab_naming.md) graduation. API URL / Python import / DB 파일명 변경 없음.
This commit is contained in:
184
stock/app/ai_summarizer.py
Normal file
184
stock/app/ai_summarizer.py
Normal file
@@ -0,0 +1,184 @@
|
||||
"""LLM 기반 뉴스 요약 모듈.
|
||||
|
||||
LLM_PROVIDER 환경변수로 provider 전환:
|
||||
- claude (기본): Anthropic Messages API (claude-haiku-4-5)
|
||||
- ollama: Windows AI 서버의 Ollama (qwen3:14b 등)
|
||||
|
||||
`summarize_news(articles)` 시그니처는 provider와 무관하게 동일하며,
|
||||
실패 시 `LLMError`(구 `OllamaError` alias)를 raise 한다.
|
||||
"""
|
||||
import os
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Dict, Any
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger("stock.ai_summarizer")
|
||||
|
||||
LLM_PROVIDER = os.getenv("LLM_PROVIDER", "claude").lower().strip()
|
||||
|
||||
# Ollama
|
||||
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://192.168.45.59:11435")
|
||||
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "qwen3:14b")
|
||||
|
||||
# Anthropic (Claude)
|
||||
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
|
||||
ANTHROPIC_MODEL = os.getenv("ANTHROPIC_MODEL", "claude-haiku-4-5-20251001")
|
||||
ANTHROPIC_URL = "https://api.anthropic.com/v1/messages"
|
||||
ANTHROPIC_VERSION = "2023-06-01"
|
||||
|
||||
_PROMPT_TEMPLATE = """당신은 한국 주식 시장 애널리스트입니다. 아래 뉴스 목록을 읽고 투자자 관점에서 한국어로 간결하게 요약하세요.
|
||||
|
||||
반드시 아래 형식을 그대로 지켜서 출력하세요. 다른 설명이나 서두, `<think>` 같은 태그는 절대 출력하지 마세요.
|
||||
|
||||
📌 시장 흐름
|
||||
(2줄 요약)
|
||||
|
||||
🔥 주목 이슈
|
||||
• (이슈 1)
|
||||
• (이슈 2)
|
||||
• (이슈 3)
|
||||
|
||||
💡 투자 관점
|
||||
(1줄 인사이트)
|
||||
|
||||
=== 뉴스 목록 ===
|
||||
{news_block}
|
||||
"""
|
||||
|
||||
|
||||
class LLMError(RuntimeError):
|
||||
"""LLM provider 호출 실패."""
|
||||
|
||||
|
||||
# 하위 호환 alias (main.py 등 기존 import 유지)
|
||||
OllamaError = LLMError
|
||||
|
||||
|
||||
def _build_news_block(articles: List[Dict[str, Any]]) -> str:
|
||||
lines = []
|
||||
for i, art in enumerate(articles, start=1):
|
||||
title = (art.get("title") or "").strip()
|
||||
content = (art.get("content") or art.get("summary") or "").strip()
|
||||
if content:
|
||||
lines.append(f"{i}. {title} — {content}")
|
||||
else:
|
||||
lines.append(f"{i}. {title}")
|
||||
return "\n".join(lines) if lines else "(뉴스 없음)"
|
||||
|
||||
|
||||
async def _summarize_with_ollama(prompt: str) -> Dict[str, Any]:
|
||||
url = f"{OLLAMA_URL.rstrip('/')}/api/generate"
|
||||
payload = {"model": OLLAMA_MODEL, "prompt": prompt, "stream": False}
|
||||
|
||||
started = time.monotonic()
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=180.0) as client:
|
||||
resp = await client.post(url, json=payload)
|
||||
except httpx.HTTPError as e:
|
||||
err_type = type(e).__name__
|
||||
err_msg = str(e) or "(no message)"
|
||||
logger.error(f"Ollama 연결 실패 ({url}): [{err_type}] {err_msg}")
|
||||
raise LLMError(f"Ollama 연결 실패: [{err_type}] {err_msg}") from e
|
||||
|
||||
if resp.status_code != 200:
|
||||
logger.error(f"Ollama 응답 오류 {resp.status_code}: {resp.text[:200]}")
|
||||
raise LLMError(f"Ollama HTTP {resp.status_code}: {resp.text[:200]}")
|
||||
|
||||
try:
|
||||
data = resp.json()
|
||||
except ValueError as e:
|
||||
raise LLMError(f"Ollama 응답 JSON 파싱 실패: {e}") from e
|
||||
|
||||
summary = (data.get("response") or "").strip()
|
||||
prompt_tokens = int(data.get("prompt_eval_count") or 0)
|
||||
completion_tokens = int(data.get("eval_count") or 0)
|
||||
total_duration_ns = int(data.get("total_duration") or 0)
|
||||
if total_duration_ns > 0:
|
||||
duration_ms = total_duration_ns // 1_000_000
|
||||
else:
|
||||
duration_ms = int((time.monotonic() - started) * 1000)
|
||||
|
||||
return {
|
||||
"summary": summary,
|
||||
"tokens": {
|
||||
"prompt": prompt_tokens,
|
||||
"completion": completion_tokens,
|
||||
"total": prompt_tokens + completion_tokens,
|
||||
},
|
||||
"model": data.get("model") or OLLAMA_MODEL,
|
||||
"duration_ms": duration_ms,
|
||||
}
|
||||
|
||||
|
||||
async def _summarize_with_claude(prompt: str) -> Dict[str, Any]:
|
||||
if not ANTHROPIC_API_KEY:
|
||||
raise LLMError("ANTHROPIC_API_KEY 미설정 — Claude provider 사용 불가")
|
||||
|
||||
headers = {
|
||||
"x-api-key": ANTHROPIC_API_KEY,
|
||||
"anthropic-version": ANTHROPIC_VERSION,
|
||||
"content-type": "application/json",
|
||||
}
|
||||
payload = {
|
||||
"model": ANTHROPIC_MODEL,
|
||||
"max_tokens": 1024,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
}
|
||||
|
||||
started = time.monotonic()
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=60.0) as client:
|
||||
resp = await client.post(ANTHROPIC_URL, headers=headers, json=payload)
|
||||
except httpx.HTTPError as e:
|
||||
err_type = type(e).__name__
|
||||
err_msg = str(e) or "(no message)"
|
||||
logger.error(f"Anthropic 연결 실패: [{err_type}] {err_msg}")
|
||||
raise LLMError(f"Anthropic 연결 실패: [{err_type}] {err_msg}") from e
|
||||
|
||||
if resp.status_code != 200:
|
||||
logger.error(f"Anthropic 응답 오류 {resp.status_code}: {resp.text[:300]}")
|
||||
raise LLMError(f"Anthropic HTTP {resp.status_code}: {resp.text[:200]}")
|
||||
|
||||
try:
|
||||
data = resp.json()
|
||||
except ValueError as e:
|
||||
raise LLMError(f"Anthropic 응답 JSON 파싱 실패: {e}") from e
|
||||
|
||||
# content: [{"type": "text", "text": "..."}]
|
||||
blocks = data.get("content") or []
|
||||
summary = "".join(b.get("text", "") for b in blocks if b.get("type") == "text").strip()
|
||||
|
||||
usage = data.get("usage") or {}
|
||||
prompt_tokens = int(usage.get("input_tokens") or 0)
|
||||
completion_tokens = int(usage.get("output_tokens") or 0)
|
||||
duration_ms = int((time.monotonic() - started) * 1000)
|
||||
|
||||
return {
|
||||
"summary": summary,
|
||||
"tokens": {
|
||||
"prompt": prompt_tokens,
|
||||
"completion": completion_tokens,
|
||||
"total": prompt_tokens + completion_tokens,
|
||||
},
|
||||
"model": data.get("model") or ANTHROPIC_MODEL,
|
||||
"duration_ms": duration_ms,
|
||||
}
|
||||
|
||||
|
||||
async def summarize_news(articles: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""뉴스 리스트를 LLM으로 요약. provider는 LLM_PROVIDER 환경변수로 선택.
|
||||
|
||||
Returns:
|
||||
{"summary": str, "tokens": {...}, "model": str, "duration_ms": int}
|
||||
Raises:
|
||||
LLMError: provider 호출 실패 시.
|
||||
"""
|
||||
prompt = _PROMPT_TEMPLATE.format(news_block=_build_news_block(articles))
|
||||
|
||||
if LLM_PROVIDER == "ollama":
|
||||
return await _summarize_with_ollama(prompt)
|
||||
if LLM_PROVIDER == "claude":
|
||||
return await _summarize_with_claude(prompt)
|
||||
raise LLMError(f"지원하지 않는 LLM_PROVIDER: {LLM_PROVIDER}")
|
||||
299
stock/app/db.py
Normal file
299
stock/app/db.py
Normal file
@@ -0,0 +1,299 @@
|
||||
import sqlite3
|
||||
import os
|
||||
import hashlib
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
from app.screener.schema import ensure_screener_schema
|
||||
|
||||
DB_PATH = os.environ.get("STOCK_DB_PATH", "/app/data/stock.db")
|
||||
|
||||
def _conn() -> sqlite3.Connection:
|
||||
db_path = os.environ.get("STOCK_DB_PATH", DB_PATH)
|
||||
parent = os.path.dirname(db_path)
|
||||
if parent:
|
||||
os.makedirs(parent, exist_ok=True)
|
||||
conn = sqlite3.connect(db_path, timeout=120.0)
|
||||
conn.row_factory = sqlite3.Row
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
conn.execute("PRAGMA busy_timeout=120000")
|
||||
return conn
|
||||
|
||||
def init_db():
|
||||
with _conn() as conn:
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS articles (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
hash TEXT UNIQUE NOT NULL,
|
||||
category TEXT DEFAULT 'domestic',
|
||||
title TEXT NOT NULL,
|
||||
link TEXT,
|
||||
summary TEXT,
|
||||
press TEXT,
|
||||
pub_date TEXT,
|
||||
crawled_at TEXT
|
||||
)
|
||||
""")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_articles_crawled ON articles(crawled_at DESC)")
|
||||
|
||||
# 컬럼 추가 (기존 테이블 마이그레이션)
|
||||
cols = {r["name"] for r in conn.execute("PRAGMA table_info(articles)").fetchall()}
|
||||
if "category" not in cols:
|
||||
conn.execute("ALTER TABLE articles ADD COLUMN category TEXT DEFAULT 'domestic'")
|
||||
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS portfolio (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
broker TEXT NOT NULL,
|
||||
ticker TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
quantity INTEGER NOT NULL,
|
||||
avg_price INTEGER NOT NULL,
|
||||
purchase_price INTEGER,
|
||||
created_at TEXT DEFAULT (datetime('now','localtime')),
|
||||
updated_at TEXT DEFAULT (datetime('now','localtime'))
|
||||
)
|
||||
""")
|
||||
|
||||
# 마이그레이션: 기존 DB에 purchase_price 컬럼 없으면 추가 후 avg_price로 백필
|
||||
_pf_cols = {r["name"] for r in conn.execute("PRAGMA table_info(portfolio)").fetchall()}
|
||||
if "purchase_price" not in _pf_cols:
|
||||
conn.execute("ALTER TABLE portfolio ADD COLUMN purchase_price INTEGER")
|
||||
conn.execute("UPDATE portfolio SET purchase_price = avg_price WHERE purchase_price IS NULL")
|
||||
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS broker_cash (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
broker TEXT UNIQUE NOT NULL,
|
||||
cash INTEGER NOT NULL DEFAULT 0,
|
||||
updated_at TEXT DEFAULT (datetime('now','localtime'))
|
||||
)
|
||||
""")
|
||||
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS asset_snapshots (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
date TEXT UNIQUE NOT NULL,
|
||||
total_eval INTEGER NOT NULL,
|
||||
total_cash INTEGER NOT NULL,
|
||||
total_assets INTEGER NOT NULL,
|
||||
created_at TEXT DEFAULT (datetime('now','localtime'))
|
||||
)
|
||||
""")
|
||||
|
||||
conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS sell_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
broker TEXT NOT NULL,
|
||||
ticker TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
quantity INTEGER NOT NULL,
|
||||
avg_price REAL NOT NULL,
|
||||
sell_price REAL NOT NULL,
|
||||
commission REAL NOT NULL DEFAULT 0,
|
||||
buy_amount REAL NOT NULL,
|
||||
sell_amount REAL NOT NULL,
|
||||
realized_profit REAL NOT NULL,
|
||||
realized_rate REAL NOT NULL,
|
||||
sold_at TEXT NOT NULL
|
||||
)
|
||||
""")
|
||||
|
||||
# sell_history 마이그레이션: commission 컬럼 추가
|
||||
sh_cols = {r["name"] for r in conn.execute("PRAGMA table_info(sell_history)").fetchall()}
|
||||
if "commission" not in sh_cols:
|
||||
conn.execute("ALTER TABLE sell_history ADD COLUMN commission REAL NOT NULL DEFAULT 0")
|
||||
|
||||
# Screener 스키마 부트스트랩 (7테이블 + 디폴트 설정 시드)
|
||||
ensure_screener_schema(conn)
|
||||
|
||||
def save_articles(articles: List[Dict[str, str]]) -> int:
|
||||
count = 0
|
||||
with _conn() as conn:
|
||||
for a in articles:
|
||||
# 중복 체크용 해시 (제목+링크)
|
||||
unique_str = f"{a['title']}|{a['link']}"
|
||||
h = hashlib.md5(unique_str.encode()).hexdigest()
|
||||
|
||||
try:
|
||||
cat = a.get("category", "domestic")
|
||||
conn.execute("""
|
||||
INSERT INTO articles (hash, category, title, link, summary, press, pub_date, crawled_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", (h, cat, a['title'], a['link'], a['summary'], a['press'], a['date'], a['crawled_at']))
|
||||
count += 1
|
||||
except sqlite3.IntegrityError:
|
||||
pass # 이미 존재함
|
||||
return count
|
||||
|
||||
def get_latest_articles(limit: int = 20, category: str = None) -> List[Dict[str, Any]]:
|
||||
with _conn() as conn:
|
||||
if category:
|
||||
rows = conn.execute(
|
||||
"SELECT * FROM articles WHERE category = ? ORDER BY crawled_at DESC, id DESC LIMIT ?",
|
||||
(category, limit)
|
||||
).fetchall()
|
||||
else:
|
||||
rows = conn.execute(
|
||||
"SELECT * FROM articles ORDER BY crawled_at DESC, id DESC LIMIT ?",
|
||||
(limit,)
|
||||
).fetchall()
|
||||
return [dict(r) for r in rows]
|
||||
|
||||
|
||||
# --- Portfolio CRUD ---
|
||||
|
||||
def add_portfolio_item(
|
||||
broker: str, ticker: str, name: str, quantity: int, avg_price: int,
|
||||
purchase_price: Optional[int] = None,
|
||||
) -> int:
|
||||
# purchase_price 미입력 시 avg_price로 기본값 설정 (하위호환)
|
||||
if purchase_price is None:
|
||||
purchase_price = avg_price
|
||||
with _conn() as conn:
|
||||
cur = conn.execute(
|
||||
"INSERT INTO portfolio (broker, ticker, name, quantity, avg_price, purchase_price) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
(broker, ticker, name, quantity, avg_price, purchase_price),
|
||||
)
|
||||
return cur.lastrowid
|
||||
|
||||
|
||||
def get_all_portfolio() -> List[Dict[str, Any]]:
|
||||
with _conn() as conn:
|
||||
rows = conn.execute("SELECT * FROM portfolio ORDER BY id").fetchall()
|
||||
return [dict(r) for r in rows]
|
||||
|
||||
|
||||
def get_portfolio_item(item_id: int) -> Dict[str, Any] | None:
|
||||
with _conn() as conn:
|
||||
row = conn.execute("SELECT * FROM portfolio WHERE id = ?", (item_id,)).fetchone()
|
||||
return dict(row) if row else None
|
||||
|
||||
|
||||
def update_portfolio_item(item_id: int, **kwargs) -> bool:
|
||||
allowed = {"broker", "ticker", "name", "quantity", "avg_price", "purchase_price"}
|
||||
fields = {k: v for k, v in kwargs.items() if k in allowed and v is not None}
|
||||
if not fields:
|
||||
return False
|
||||
fields["updated_at"] = __import__("datetime").datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
set_clause = ", ".join(f"{k} = ?" for k in fields)
|
||||
values = list(fields.values()) + [item_id]
|
||||
with _conn() as conn:
|
||||
cur = conn.execute(f"UPDATE portfolio SET {set_clause} WHERE id = ?", values)
|
||||
return cur.rowcount > 0
|
||||
|
||||
|
||||
def delete_portfolio_item(item_id: int) -> bool:
|
||||
with _conn() as conn:
|
||||
cur = conn.execute("DELETE FROM portfolio WHERE id = ?", (item_id,))
|
||||
return cur.rowcount > 0
|
||||
|
||||
|
||||
# --- Broker Cash CRUD ---
|
||||
|
||||
def upsert_broker_cash(broker: str, cash: int) -> None:
|
||||
now = __import__("datetime").datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
with _conn() as conn:
|
||||
conn.execute("""
|
||||
INSERT INTO broker_cash (broker, cash, updated_at)
|
||||
VALUES (?, ?, ?)
|
||||
ON CONFLICT(broker) DO UPDATE SET cash = excluded.cash, updated_at = excluded.updated_at
|
||||
""", (broker, cash, now))
|
||||
|
||||
|
||||
def get_all_broker_cash() -> List[Dict[str, Any]]:
|
||||
with _conn() as conn:
|
||||
rows = conn.execute("SELECT * FROM broker_cash ORDER BY broker").fetchall()
|
||||
return [dict(r) for r in rows]
|
||||
|
||||
|
||||
def delete_broker_cash(broker: str) -> bool:
|
||||
with _conn() as conn:
|
||||
cur = conn.execute("DELETE FROM broker_cash WHERE broker = ?", (broker,))
|
||||
return cur.rowcount > 0
|
||||
|
||||
|
||||
# --- Asset Snapshot CRUD ---
|
||||
|
||||
def upsert_asset_snapshot(date: str, total_eval: int, total_cash: int, total_assets: int) -> None:
|
||||
now = __import__("datetime").datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
with _conn() as conn:
|
||||
conn.execute("""
|
||||
INSERT INTO asset_snapshots (date, total_eval, total_cash, total_assets, created_at)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(date) DO UPDATE SET
|
||||
total_eval = excluded.total_eval,
|
||||
total_cash = excluded.total_cash,
|
||||
total_assets = excluded.total_assets,
|
||||
created_at = excluded.created_at
|
||||
""", (date, total_eval, total_cash, total_assets, now))
|
||||
|
||||
|
||||
# --- Sell History CRUD ---
|
||||
|
||||
def add_sell_history(data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
with _conn() as conn:
|
||||
cur = conn.execute("""
|
||||
INSERT INTO sell_history
|
||||
(broker, ticker, name, quantity, avg_price, sell_price,
|
||||
commission, buy_amount, sell_amount, realized_profit, realized_rate, sold_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", (
|
||||
data["broker"], data["ticker"], data["name"], data["quantity"],
|
||||
data["avg_price"], data["sell_price"], data.get("commission", 0),
|
||||
data["buy_amount"], data["sell_amount"], data["realized_profit"],
|
||||
data["realized_rate"], data["sold_at"],
|
||||
))
|
||||
row = conn.execute("SELECT * FROM sell_history WHERE id = ?", (cur.lastrowid,)).fetchone()
|
||||
return dict(row)
|
||||
|
||||
|
||||
def get_sell_history(broker: str = None, days: int = None) -> List[Dict[str, Any]]:
|
||||
conditions = []
|
||||
params = []
|
||||
if broker:
|
||||
conditions.append("broker = ?")
|
||||
params.append(broker)
|
||||
if days:
|
||||
conditions.append("sold_at >= datetime('now', ? || ' days')")
|
||||
params.append(f"-{days}")
|
||||
where = f"WHERE {' AND '.join(conditions)}" if conditions else ""
|
||||
with _conn() as conn:
|
||||
rows = conn.execute(
|
||||
f"SELECT * FROM sell_history {where} ORDER BY sold_at DESC",
|
||||
params,
|
||||
).fetchall()
|
||||
return [dict(r) for r in rows]
|
||||
|
||||
|
||||
def update_sell_history(record_id: int, data: Dict[str, Any]) -> Dict[str, Any] | None:
|
||||
fields = ["broker", "ticker", "name", "quantity", "avg_price", "sell_price",
|
||||
"commission", "buy_amount", "sell_amount", "realized_profit", "realized_rate", "sold_at"]
|
||||
set_clause = ", ".join(f"{f} = ?" for f in fields)
|
||||
values = [data.get(f, 0) if f == "commission" else data[f] for f in fields] + [record_id]
|
||||
with _conn() as conn:
|
||||
cur = conn.execute(f"UPDATE sell_history SET {set_clause} WHERE id = ?", values)
|
||||
if cur.rowcount == 0:
|
||||
return None
|
||||
row = conn.execute("SELECT * FROM sell_history WHERE id = ?", (record_id,)).fetchone()
|
||||
return dict(row)
|
||||
|
||||
|
||||
def delete_sell_history(record_id: int) -> bool:
|
||||
with _conn() as conn:
|
||||
cur = conn.execute("DELETE FROM sell_history WHERE id = ?", (record_id,))
|
||||
return cur.rowcount > 0
|
||||
|
||||
|
||||
def get_asset_snapshots(days: int = 30) -> List[Dict[str, Any]]:
|
||||
with _conn() as conn:
|
||||
if days == 0:
|
||||
rows = conn.execute(
|
||||
"SELECT date, total_eval, total_cash, total_assets FROM asset_snapshots ORDER BY date ASC"
|
||||
).fetchall()
|
||||
else:
|
||||
rows = conn.execute(
|
||||
"SELECT date, total_eval, total_cash, total_assets FROM asset_snapshots ORDER BY date DESC LIMIT ?",
|
||||
(days,)
|
||||
).fetchall()
|
||||
rows = list(reversed(rows))
|
||||
return [dict(r) for r in rows]
|
||||
18
stock/app/holidays.json
Normal file
18
stock/app/holidays.json
Normal file
@@ -0,0 +1,18 @@
|
||||
[
|
||||
"2026-01-01",
|
||||
"2026-01-28",
|
||||
"2026-01-29",
|
||||
"2026-01-30",
|
||||
"2026-03-01",
|
||||
"2026-05-05",
|
||||
"2026-05-25",
|
||||
"2026-06-06",
|
||||
"2026-08-15",
|
||||
"2026-09-24",
|
||||
"2026-09-25",
|
||||
"2026-09-26",
|
||||
"2026-10-03",
|
||||
"2026-10-09",
|
||||
"2026-12-25",
|
||||
"2026-12-31"
|
||||
]
|
||||
535
stock/app/main.py
Normal file
535
stock/app/main.py
Normal file
@@ -0,0 +1,535 @@
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from datetime import date as date_type
|
||||
from typing import Optional
|
||||
from fastapi import FastAPI, Query, Header, Depends, HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
import requests
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from pydantic import BaseModel
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s")
|
||||
logger = logging.getLogger("stock")
|
||||
|
||||
from .db import (
|
||||
init_db, save_articles, get_latest_articles,
|
||||
add_portfolio_item, get_all_portfolio, get_portfolio_item,
|
||||
update_portfolio_item, delete_portfolio_item,
|
||||
upsert_broker_cash, get_all_broker_cash, delete_broker_cash,
|
||||
upsert_asset_snapshot, get_asset_snapshots,
|
||||
add_sell_history, get_sell_history, update_sell_history, delete_sell_history,
|
||||
)
|
||||
from .scraper import fetch_market_news, fetch_major_indices
|
||||
from .price_fetcher import get_current_prices, get_current_prices_detail
|
||||
from .ai_summarizer import summarize_news, OllamaError
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
# Screener 라우터 등록
|
||||
from app.screener.router import router as screener_router
|
||||
app.include_router(screener_router)
|
||||
|
||||
# CORS 설정 (프론트엔드 접근 허용)
|
||||
_cors_origins = os.getenv("CORS_ALLOW_ORIGINS", "http://localhost:3007,http://localhost:8080").split(",")
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=[o.strip() for o in _cors_origins],
|
||||
allow_credentials=False,
|
||||
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
||||
allow_headers=["Content-Type"],
|
||||
)
|
||||
|
||||
scheduler = BackgroundScheduler(timezone=os.getenv("TZ", "Asia/Seoul"))
|
||||
|
||||
# Windows AI Server URL (NAS .env에서 설정)
|
||||
WINDOWS_AI_SERVER_URL = os.getenv("WINDOWS_AI_SERVER_URL", "http://192.168.0.5:8000")
|
||||
|
||||
# Admin API Key 인증
|
||||
ADMIN_API_KEY = os.getenv("ADMIN_API_KEY", "")
|
||||
|
||||
def verify_admin(x_admin_key: str = Header(None)):
|
||||
"""admin/trade 엔드포인트 보호용 API 키 검증"""
|
||||
if not ADMIN_API_KEY:
|
||||
return # 키 미설정 시 인증 비활성화 (개발 환경)
|
||||
if x_admin_key != ADMIN_API_KEY:
|
||||
raise HTTPException(status_code=401, detail="Unauthorized")
|
||||
|
||||
# Anthropic API 프록시용 키 (서버 측 보관)
|
||||
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
|
||||
|
||||
# 공휴일 목록 로드
|
||||
_HOLIDAYS_PATH = os.path.join(os.path.dirname(__file__), "holidays.json")
|
||||
try:
|
||||
with open(_HOLIDAYS_PATH, "r") as f:
|
||||
_HOLIDAYS: set = set(json.load(f))
|
||||
except Exception:
|
||||
_HOLIDAYS = set()
|
||||
|
||||
def is_market_open(d: date_type) -> bool:
|
||||
return d.weekday() < 5 and d.strftime("%Y-%m-%d") not in _HOLIDAYS
|
||||
|
||||
|
||||
def _calc_portfolio_totals(items, prices):
|
||||
"""포트폴리오 총 매입/평가금 계산 (snapshot과 API에서 공용)"""
|
||||
total_buy = 0
|
||||
total_eval = 0
|
||||
for item in items:
|
||||
buy_amount = item["avg_price"] * item["quantity"]
|
||||
current_price = prices.get(item["ticker"], item["avg_price"])
|
||||
total_buy += buy_amount
|
||||
total_eval += current_price * item["quantity"]
|
||||
return total_buy, total_eval
|
||||
|
||||
|
||||
def save_daily_snapshot():
|
||||
today = date_type.today()
|
||||
if not is_market_open(today):
|
||||
logger.info(f"Snapshot: {today} 휴장일 — 스킵")
|
||||
return
|
||||
|
||||
today_str = today.strftime("%Y-%m-%d")
|
||||
items = get_all_portfolio()
|
||||
cash_rows = get_all_broker_cash()
|
||||
total_cash = sum(r["cash"] for r in cash_rows)
|
||||
|
||||
if items:
|
||||
tickers = list({item["ticker"] for item in items})
|
||||
prices = get_current_prices(tickers)
|
||||
_, total_eval = _calc_portfolio_totals(items, prices)
|
||||
else:
|
||||
total_eval = 0
|
||||
|
||||
total_assets = total_eval + total_cash
|
||||
upsert_asset_snapshot(today_str, total_eval, total_cash, total_assets)
|
||||
logger.info(f"Snapshot: {today_str} 저장 — eval={total_eval}, cash={total_cash}, total={total_assets}")
|
||||
|
||||
@app.on_event("startup")
|
||||
def on_startup():
|
||||
init_db()
|
||||
|
||||
# 매일 아침 8시 뉴스 스크랩 (NAS 자체 수행)
|
||||
scheduler.add_job(run_scraping_job, "cron", hour="8", minute="0")
|
||||
|
||||
# 평일 15:40 총 자산 스냅샷 저장
|
||||
scheduler.add_job(save_daily_snapshot, "cron", day_of_week="mon-fri", hour=15, minute=40)
|
||||
|
||||
# 앱 시작 시에도 한 번 실행 (데이터 없으면)
|
||||
if not get_latest_articles(1):
|
||||
run_scraping_job()
|
||||
|
||||
scheduler.start()
|
||||
|
||||
def run_scraping_job():
|
||||
logger.info("뉴스 스크래핑 시작")
|
||||
|
||||
articles_kr = fetch_market_news()
|
||||
count_kr = save_articles(articles_kr)
|
||||
|
||||
logger.info(f"스크래핑 완료: 국내 {count_kr}건")
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
return {"ok": True}
|
||||
|
||||
@app.get("/api/stock/news")
|
||||
def get_news(limit: int = 20, category: str = None):
|
||||
"""최신 주식 뉴스 조회 (category: 'domestic' | 'overseas')"""
|
||||
return get_latest_articles(limit, category)
|
||||
|
||||
@app.get("/api/stock/indices")
|
||||
def get_indices():
|
||||
"""주요 지표(KOSPI 등) 실시간 크롤링 조회"""
|
||||
return fetch_major_indices()
|
||||
|
||||
@app.post("/api/stock/scrap")
|
||||
def trigger_scrap():
|
||||
"""수동 스크랩 트리거"""
|
||||
run_scraping_job()
|
||||
return {"ok": True}
|
||||
|
||||
|
||||
class NewsSummarizeRequest(BaseModel):
|
||||
limit: Optional[int] = 10
|
||||
|
||||
|
||||
@app.post("/api/stock/news/summarize")
|
||||
async def summarize_latest_news(req: NewsSummarizeRequest = NewsSummarizeRequest()):
|
||||
"""최근 뉴스를 Ollama(qwen3:14b)로 요약"""
|
||||
limit = req.limit if (req and req.limit) else 10
|
||||
articles = get_latest_articles(limit)
|
||||
if not articles:
|
||||
raise HTTPException(status_code=404, detail="요약할 뉴스가 없습니다.")
|
||||
|
||||
try:
|
||||
result = await summarize_news(articles)
|
||||
except OllamaError as e:
|
||||
logger.error(f"뉴스 요약 실패: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Ollama 호출 실패: {e}")
|
||||
except Exception as e:
|
||||
logger.exception("뉴스 요약 중 예상치 못한 오류")
|
||||
raise HTTPException(status_code=500, detail=f"뉴스 요약 실패: {e}")
|
||||
|
||||
# 상위 기사 메타 일부만 노출 (클라이언트가 본문 조립에 사용)
|
||||
top_articles = [
|
||||
{
|
||||
"title": (a.get("title") or "").strip(),
|
||||
"link": a.get("link") or "",
|
||||
"press": a.get("press") or "",
|
||||
"pub_date": a.get("pub_date") or "",
|
||||
}
|
||||
for a in articles[:8]
|
||||
]
|
||||
return {
|
||||
**result,
|
||||
"article_count": len(articles),
|
||||
"articles": top_articles,
|
||||
}
|
||||
|
||||
# --- Trading API (Windows Proxy, 인증 필요) ---
|
||||
|
||||
@app.get("/api/trade/balance", dependencies=[Depends(verify_admin)])
|
||||
def get_balance():
|
||||
"""계좌 잔고 조회 (Windows AI Server Proxy)"""
|
||||
logger.info(f"Requesting Balance from {WINDOWS_AI_SERVER_URL}")
|
||||
resp = None
|
||||
try:
|
||||
resp = requests.get(f"{WINDOWS_AI_SERVER_URL}/trade/balance", timeout=5)
|
||||
if resp.status_code != 200:
|
||||
logger.error(f"Balance Error: {resp.status_code}")
|
||||
return JSONResponse(status_code=resp.status_code, content=resp.json())
|
||||
return resp.json()
|
||||
except ValueError:
|
||||
status = resp.status_code if resp is not None else 502
|
||||
return JSONResponse(status_code=status, content={"error": f"Upstream error {status}"})
|
||||
except Exception as e:
|
||||
logger.error(f"Balance Connection Failed: {e}")
|
||||
return JSONResponse(status_code=500, content={"error": "Connection Failed"})
|
||||
|
||||
class OrderRequest(BaseModel):
|
||||
ticker: str
|
||||
action: str
|
||||
quantity: int
|
||||
price: int = 0
|
||||
reason: Optional[str] = "Manual Order"
|
||||
|
||||
@app.post("/api/trade/order", dependencies=[Depends(verify_admin)])
|
||||
def order_stock(req: OrderRequest):
|
||||
"""주식 매수/매도 주문 (Windows AI Server Proxy)"""
|
||||
logger.info(f"Order Request: {req.action} {req.ticker} x{req.quantity}")
|
||||
resp = None
|
||||
try:
|
||||
resp = requests.post(f"{WINDOWS_AI_SERVER_URL}/trade/order", json=req.model_dump(), timeout=10)
|
||||
if resp.status_code != 200:
|
||||
logger.error(f"Order Error: {resp.status_code}")
|
||||
return JSONResponse(status_code=resp.status_code, content=resp.json())
|
||||
return resp.json()
|
||||
except ValueError:
|
||||
status = resp.status_code if resp is not None else 502
|
||||
return JSONResponse(status_code=status, content={"error": f"Upstream error {status}"})
|
||||
except Exception as e:
|
||||
logger.error(f"Order Connection Failed: {e}")
|
||||
return JSONResponse(status_code=500, content={"error": "Connection Failed"})
|
||||
|
||||
# --- AI Coach 프록시 (API 키를 서버에 보관) ---
|
||||
|
||||
class AiCoachRequest(BaseModel):
|
||||
model: str = "claude-haiku-4-5-20251001"
|
||||
prompt: str
|
||||
max_tokens: int = 1024
|
||||
|
||||
@app.post("/api/stock/ai-coach")
|
||||
def ai_coach(req: AiCoachRequest):
|
||||
"""AI 포트폴리오 코치 — Anthropic API 프록시 (API 키 서버 보관)"""
|
||||
if not ANTHROPIC_API_KEY:
|
||||
raise HTTPException(503, "AI Coach not configured (ANTHROPIC_API_KEY missing)")
|
||||
|
||||
allowed_models = {"claude-haiku-4-5-20251001", "claude-sonnet-4-6"}
|
||||
model = req.model if req.model in allowed_models else "claude-haiku-4-5-20251001"
|
||||
|
||||
try:
|
||||
resp = requests.post(
|
||||
"https://api.anthropic.com/v1/messages",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": ANTHROPIC_API_KEY,
|
||||
"anthropic-version": "2023-06-01",
|
||||
},
|
||||
json={
|
||||
"model": model,
|
||||
"max_tokens": req.max_tokens,
|
||||
"messages": [{"role": "user", "content": req.prompt}],
|
||||
},
|
||||
timeout=30,
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
logger.error(f"Anthropic API error: {resp.status_code}")
|
||||
return JSONResponse(status_code=resp.status_code, content={"error": "AI API error"})
|
||||
return resp.json()
|
||||
except requests.Timeout:
|
||||
return JSONResponse(status_code=504, content={"error": "AI API timeout"})
|
||||
except Exception as e:
|
||||
logger.error(f"AI Coach error: {e}")
|
||||
return JSONResponse(status_code=500, content={"error": "AI Coach failed"})
|
||||
|
||||
|
||||
|
||||
|
||||
@app.get("/api/version")
|
||||
def version():
|
||||
return {"version": os.getenv("APP_VERSION", "dev")}
|
||||
|
||||
|
||||
# --- Portfolio API ---
|
||||
|
||||
class PortfolioItemRequest(BaseModel):
|
||||
broker: str
|
||||
ticker: str
|
||||
name: str
|
||||
quantity: int
|
||||
avg_price: int # 평균단가 (현재가 평가/손익 계산용)
|
||||
purchase_price: Optional[int] = None # 매입가 (총 매입 금액 계산용, 미입력 시 avg_price로 자동 설정)
|
||||
|
||||
|
||||
class PortfolioUpdateRequest(BaseModel):
|
||||
broker: Optional[str] = None
|
||||
ticker: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
quantity: Optional[int] = None
|
||||
avg_price: Optional[int] = None
|
||||
purchase_price: Optional[int] = None
|
||||
|
||||
|
||||
@app.get("/api/portfolio")
|
||||
def get_portfolio():
|
||||
"""전체 포트폴리오 조회 (현재가 + 손익 + 예수금 포함)"""
|
||||
items = get_all_portfolio()
|
||||
cash_rows = get_all_broker_cash()
|
||||
total_cash = sum(r["cash"] for r in cash_rows)
|
||||
|
||||
if not items:
|
||||
return {
|
||||
"holdings": [],
|
||||
"cash": cash_rows,
|
||||
"summary": {
|
||||
"total_buy": 0,
|
||||
"total_eval": 0,
|
||||
"total_profit": 0,
|
||||
"total_profit_rate": 0.0,
|
||||
"total_cash": total_cash,
|
||||
"total_assets": total_cash,
|
||||
},
|
||||
}
|
||||
|
||||
tickers = list({item["ticker"] for item in items})
|
||||
details = get_current_prices_detail(tickers)
|
||||
|
||||
holdings = []
|
||||
total_buy = 0 # 요약 표시용 (purchase_price 기반)
|
||||
total_cost_basis = 0 # 손익률 계산용 (avg_price 기반)
|
||||
total_eval = 0
|
||||
|
||||
for item in items:
|
||||
detail = details.get(item["ticker"])
|
||||
current_price = detail["price"] if detail else None
|
||||
price_session = detail["session"] if detail else None
|
||||
price_as_of = detail["as_of"] if detail else None
|
||||
# avg_price: 평균단가 — 손익(평가금액 - 매입원가) 계산 기준
|
||||
# purchase_price: 매입가 — 총 매입 금액 표시 기준 (없으면 avg_price로 폴백)
|
||||
purchase_price = item.get("purchase_price") if item.get("purchase_price") is not None else item["avg_price"]
|
||||
cost_basis = item["avg_price"] * item["quantity"]
|
||||
# 총 매입 금액 표시는 종목별 매입가의 단순 합계 (수량 미곱산)
|
||||
buy_amount = purchase_price
|
||||
eval_amount = current_price * item["quantity"] if current_price is not None else None
|
||||
profit_amount = (eval_amount - cost_basis) if eval_amount is not None else None
|
||||
profit_rate = round((profit_amount / cost_basis) * 100, 2) if (profit_amount is not None and cost_basis) else None
|
||||
|
||||
holdings.append({
|
||||
"id": item["id"],
|
||||
"broker": item["broker"],
|
||||
"ticker": item["ticker"],
|
||||
"name": item["name"],
|
||||
"quantity": item["quantity"],
|
||||
"avg_price": item["avg_price"],
|
||||
"purchase_price": purchase_price,
|
||||
"current_price": current_price,
|
||||
"price_session": price_session,
|
||||
"price_as_of": price_as_of,
|
||||
"eval_amount": eval_amount,
|
||||
"profit_amount": profit_amount,
|
||||
"profit_rate": profit_rate,
|
||||
})
|
||||
|
||||
total_buy += buy_amount
|
||||
total_cost_basis += cost_basis
|
||||
if eval_amount is not None:
|
||||
total_eval += eval_amount
|
||||
|
||||
# 손익은 실제 평균단가(cost_basis) 기준으로 계산
|
||||
total_profit = total_eval - total_cost_basis
|
||||
total_profit_rate = round((total_profit / total_cost_basis) * 100, 2) if total_cost_basis else 0.0
|
||||
|
||||
return {
|
||||
"holdings": holdings,
|
||||
"cash": cash_rows,
|
||||
"summary": {
|
||||
"total_buy": total_buy,
|
||||
"total_eval": total_eval,
|
||||
"total_profit": total_profit,
|
||||
"total_profit_rate": total_profit_rate,
|
||||
"total_cash": total_cash,
|
||||
"total_assets": total_eval + total_cash,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@app.post("/api/portfolio", status_code=201)
|
||||
def create_portfolio_item(req: PortfolioItemRequest):
|
||||
"""포트폴리오 종목 추가"""
|
||||
item_id = add_portfolio_item(
|
||||
req.broker, req.ticker, req.name, req.quantity, req.avg_price,
|
||||
purchase_price=req.purchase_price,
|
||||
)
|
||||
return {"id": item_id, "ok": True}
|
||||
|
||||
|
||||
# --- Broker Cash API ---
|
||||
# /{item_id} 라우트보다 반드시 먼저 정의해야 /cash가 item_id로 매칭되지 않음
|
||||
|
||||
class BrokerCashRequest(BaseModel):
|
||||
broker: str
|
||||
cash: int
|
||||
|
||||
|
||||
@app.get("/api/portfolio/cash")
|
||||
def list_broker_cash():
|
||||
"""증권사별 예수금 전체 조회"""
|
||||
return get_all_broker_cash()
|
||||
|
||||
|
||||
@app.put("/api/portfolio/cash")
|
||||
def set_broker_cash(req: BrokerCashRequest):
|
||||
"""증권사 예수금 등록 또는 수정 (upsert)"""
|
||||
upsert_broker_cash(req.broker, req.cash)
|
||||
return {"ok": True, "broker": req.broker, "cash": req.cash}
|
||||
|
||||
|
||||
@app.delete("/api/portfolio/cash/{broker}")
|
||||
def remove_broker_cash(broker: str):
|
||||
"""증권사 예수금 삭제"""
|
||||
if not delete_broker_cash(broker):
|
||||
return JSONResponse(status_code=404, content={"error": "Broker not found"})
|
||||
return {"ok": True}
|
||||
|
||||
|
||||
@app.put("/api/portfolio/{item_id}")
|
||||
def update_portfolio(item_id: int, req: PortfolioUpdateRequest):
|
||||
"""포트폴리오 종목 수정"""
|
||||
if get_portfolio_item(item_id) is None:
|
||||
return JSONResponse(status_code=404, content={"error": "Item not found"})
|
||||
update_portfolio_item(item_id, **req.model_dump())
|
||||
return {"ok": True}
|
||||
|
||||
|
||||
@app.delete("/api/portfolio/{item_id}")
|
||||
def delete_portfolio(item_id: int):
|
||||
"""포트폴리오 종목 삭제"""
|
||||
if not delete_portfolio_item(item_id):
|
||||
return JSONResponse(status_code=404, content={"error": "Item not found"})
|
||||
return {"ok": True}
|
||||
|
||||
|
||||
# --- Asset Snapshot API ---
|
||||
|
||||
@app.post("/api/portfolio/snapshot")
|
||||
def create_snapshot():
|
||||
"""총 자산 스냅샷 수동 저장 (오늘 날짜 기준)"""
|
||||
today = date_type.today()
|
||||
today_str = today.strftime("%Y-%m-%d")
|
||||
|
||||
items = get_all_portfolio()
|
||||
cash_rows = get_all_broker_cash()
|
||||
total_cash = sum(r["cash"] for r in cash_rows)
|
||||
|
||||
if items:
|
||||
tickers = list({item["ticker"] for item in items})
|
||||
prices = get_current_prices(tickers)
|
||||
total_eval = sum(
|
||||
prices.get(item["ticker"], item["avg_price"]) * item["quantity"]
|
||||
for item in items
|
||||
)
|
||||
else:
|
||||
total_eval = 0
|
||||
|
||||
total_assets = total_eval + total_cash
|
||||
upsert_asset_snapshot(today_str, total_eval, total_cash, total_assets)
|
||||
|
||||
return {
|
||||
"ok": True,
|
||||
"snapshot": {
|
||||
"date": today_str,
|
||||
"total_eval": total_eval,
|
||||
"total_cash": total_cash,
|
||||
"total_assets": total_assets,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/portfolio/snapshot/history")
|
||||
def get_snapshot_history(days: int = Query(30, ge=0)):
|
||||
"""총 자산 스냅샷 이력 조회 (days=0: 전체, days=N: 최근 N일)"""
|
||||
snapshots = get_asset_snapshots(days)
|
||||
return {"snapshots": snapshots}
|
||||
|
||||
|
||||
# --- Sell History API ---
|
||||
|
||||
class SellHistoryRequest(BaseModel):
|
||||
broker: str
|
||||
ticker: str
|
||||
name: str
|
||||
quantity: int
|
||||
avg_price: float
|
||||
sell_price: float
|
||||
commission: float = 0
|
||||
buy_amount: float
|
||||
sell_amount: float
|
||||
realized_profit: float
|
||||
realized_rate: float
|
||||
sold_at: str
|
||||
|
||||
|
||||
@app.get("/api/portfolio/sell-history")
|
||||
def list_sell_history(broker: Optional[str] = None, days: Optional[int] = None):
|
||||
"""매도 내역 조회 (broker, days 필터 선택)"""
|
||||
records = get_sell_history(broker=broker, days=days)
|
||||
return {"records": records}
|
||||
|
||||
|
||||
@app.post("/api/portfolio/sell-history")
|
||||
def create_sell_history(req: SellHistoryRequest):
|
||||
"""매도 기록 저장"""
|
||||
record = add_sell_history(req.model_dump())
|
||||
return record
|
||||
|
||||
|
||||
@app.put("/api/portfolio/sell-history/{record_id}")
|
||||
def modify_sell_history(record_id: int, req: SellHistoryRequest):
|
||||
"""매도 기록 수정"""
|
||||
record = update_sell_history(record_id, req.model_dump())
|
||||
if record is None:
|
||||
return JSONResponse(status_code=404, content={"error": "not found"})
|
||||
return record
|
||||
|
||||
|
||||
@app.delete("/api/portfolio/sell-history/{record_id}")
|
||||
def remove_sell_history(record_id: int):
|
||||
"""매도 기록 삭제"""
|
||||
if not delete_sell_history(record_id):
|
||||
return JSONResponse(status_code=404, content={"error": "not found"})
|
||||
return {"ok": True}
|
||||
|
||||
|
||||
|
||||
|
||||
139
stock/app/price_fetcher.py
Normal file
139
stock/app/price_fetcher.py
Normal file
@@ -0,0 +1,139 @@
|
||||
import time
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from typing import Optional
|
||||
|
||||
# 캐시는 detail 단위(가격+세션+as_of)로 보관. 호환용 단순 가격은 여기서 추출.
|
||||
_cache: dict[str, tuple[Optional[dict], float]] = {} # ticker -> (detail | None, timestamp)
|
||||
_CACHE_TTL = 180 # 3분
|
||||
|
||||
_HEADERS = {
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/90.0.4430.93 Safari/537.36"
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def _parse_price_str(value) -> Optional[int]:
|
||||
if value is None:
|
||||
return None
|
||||
s = str(value).replace(",", "").strip()
|
||||
if not s:
|
||||
return None
|
||||
# 음수/소수점도 일단 정수 라운드(국내 주식은 정수)
|
||||
try:
|
||||
return int(float(s))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _select_price_from_response(payload: dict) -> dict:
|
||||
"""네이버 모바일 주식 API 응답 dict에서 (price, session, as_of)를 결정.
|
||||
|
||||
세션 분류:
|
||||
- "REGULAR" : 정규장(KRX) 운영중 — closePrice가 실시간
|
||||
- "NXT_PRE" : 정규장 마감 + NXT 프리마켓 운영중 → overPrice 사용
|
||||
- "NXT_AFTER" : 정규장 마감 + NXT 애프터마켓 운영중 → overPrice 사용
|
||||
- "CLOSED" : 정규장 마감 + NXT 비운영/거래중지 → closePrice 사용
|
||||
|
||||
반환 dict: {"price": int | None, "session": str, "as_of": str | None}
|
||||
"""
|
||||
close_price = _parse_price_str(payload.get("closePrice") or payload.get("stockEndPrice"))
|
||||
top_as_of = payload.get("localTradedAt")
|
||||
|
||||
market_status = (payload.get("marketStatus") or "").upper()
|
||||
if market_status == "OPEN":
|
||||
return {"price": close_price, "session": "REGULAR", "as_of": top_as_of}
|
||||
|
||||
over = payload.get("overMarketPriceInfo")
|
||||
if isinstance(over, dict):
|
||||
over_status = (over.get("overMarketStatus") or "").upper()
|
||||
trade_stop_name = ((over.get("tradeStopType") or {}).get("name") or "").upper()
|
||||
if over_status == "OPEN" and trade_stop_name == "TRADING":
|
||||
over_price = _parse_price_str(over.get("overPrice"))
|
||||
if over_price is not None:
|
||||
session_type = (over.get("tradingSessionType") or "").upper()
|
||||
if session_type == "PRE_MARKET":
|
||||
session = "NXT_PRE"
|
||||
elif session_type == "AFTER_MARKET":
|
||||
session = "NXT_AFTER"
|
||||
else:
|
||||
# 알 수 없는 NXT 세션은 보수적으로 AFTER 취급
|
||||
session = "NXT_AFTER"
|
||||
return {
|
||||
"price": over_price,
|
||||
"session": session,
|
||||
"as_of": over.get("localTradedAt") or top_as_of,
|
||||
}
|
||||
|
||||
return {"price": close_price, "session": "CLOSED", "as_of": top_as_of}
|
||||
|
||||
|
||||
def _fetch_mobile_api_payload(ticker: str) -> Optional[dict]:
|
||||
"""네이버 모바일 주식 API 응답 dict 반환."""
|
||||
url = f"https://m.stock.naver.com/api/stock/{ticker}/basic"
|
||||
try:
|
||||
resp = requests.get(url, headers=_HEADERS, timeout=5)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _fetch_close_price_from_html(ticker: str) -> Optional[int]:
|
||||
"""네이버 금융 HTML 폴백 (정규장 종가만 가능, NXT 정보 없음)."""
|
||||
url = f"https://finance.naver.com/item/main.naver?code={ticker}"
|
||||
try:
|
||||
resp = requests.get(url, headers=_HEADERS, timeout=5)
|
||||
resp.raise_for_status()
|
||||
soup = BeautifulSoup(resp.content, "html.parser", from_encoding="cp949")
|
||||
tag = soup.select_one(".no_today .blind")
|
||||
if tag:
|
||||
return _parse_price_str(tag.get_text(strip=True))
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_current_price_info(ticker: str) -> Optional[dict]:
|
||||
"""단건 상세 가격 정보 조회 (3분 캐시).
|
||||
|
||||
반환: {"price": int | None, "session": str, "as_of": str | None} | None
|
||||
"""
|
||||
now = time.time()
|
||||
cached = _cache.get(ticker)
|
||||
if cached and (now - cached[1]) < _CACHE_TTL:
|
||||
return cached[0]
|
||||
|
||||
detail: Optional[dict] = None
|
||||
payload = _fetch_mobile_api_payload(ticker)
|
||||
if isinstance(payload, dict):
|
||||
detail = _select_price_from_response(payload)
|
||||
if detail.get("price") is None:
|
||||
detail = None # 폴백 시도
|
||||
|
||||
if detail is None:
|
||||
fallback_price = _fetch_close_price_from_html(ticker)
|
||||
if fallback_price is not None:
|
||||
detail = {"price": fallback_price, "session": "CLOSED", "as_of": None}
|
||||
|
||||
_cache[ticker] = (detail, now)
|
||||
return detail
|
||||
|
||||
|
||||
def get_current_prices_detail(tickers: list[str]) -> dict[str, Optional[dict]]:
|
||||
"""배치 상세 가격 조회 (캐시 미스 종목만 실제 호출)."""
|
||||
return {ticker: get_current_price_info(ticker) for ticker in tickers}
|
||||
|
||||
|
||||
def get_current_price(ticker: str) -> Optional[int]:
|
||||
"""단건 현재가 조회 — 호환용. detail에서 price만 추출."""
|
||||
detail = get_current_price_info(ticker)
|
||||
return detail["price"] if detail else None
|
||||
|
||||
|
||||
def get_current_prices(tickers: list[str]) -> dict[str, Optional[int]]:
|
||||
"""배치 현재가 조회 — 호환용."""
|
||||
return {ticker: get_current_price(ticker) for ticker in tickers}
|
||||
231
stock/app/scraper.py
Normal file
231
stock/app/scraper.py
Normal file
@@ -0,0 +1,231 @@
|
||||
import logging
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from typing import List, Dict, Any
|
||||
import time
|
||||
|
||||
logger = logging.getLogger("stock.scraper")
|
||||
|
||||
# 네이버 파이낸스 주요 뉴스
|
||||
NAVER_FINANCE_NEWS_URL = "https://finance.naver.com/news/mainnews.naver"
|
||||
# 해외증시 뉴스 (모바일 API 사용)
|
||||
# NAVER_FINANCE_WORLD_NEWS_URL 사용 안함.
|
||||
|
||||
# 해외증시 메인 (지수용)
|
||||
NAVER_FINANCE_WORLD_URL = "https://finance.naver.com/world/"
|
||||
|
||||
def fetch_market_news() -> List[Dict[str, str]]:
|
||||
"""
|
||||
네이버 금융 '주요 뉴스' 크롤링
|
||||
반환: [{"title": "...", "link": "...", "summary": "...", "date": "..."}, ...]
|
||||
"""
|
||||
try:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"
|
||||
}
|
||||
resp = requests.get(NAVER_FINANCE_NEWS_URL, headers=headers, timeout=10)
|
||||
resp.raise_for_status()
|
||||
|
||||
soup = BeautifulSoup(resp.content, "html.parser", from_encoding="cp949")
|
||||
|
||||
# 주요 뉴스 리스트 추출
|
||||
# 구조: div.mainNewsList > ul > li
|
||||
articles = []
|
||||
news_list = soup.select(".mainNewsList ul li")
|
||||
|
||||
for li in news_list:
|
||||
# 썸네일 있을 수도 있고 없을 수도 있음
|
||||
dl = li.select_one("dl")
|
||||
if not dl:
|
||||
continue
|
||||
|
||||
# 제목 (dd.articleSubject > a)
|
||||
subject_tag = dl.select_one(".articleSubject a")
|
||||
if not subject_tag:
|
||||
continue
|
||||
|
||||
title = subject_tag.get_text(strip=True)
|
||||
link = "https://finance.naver.com" + subject_tag["href"]
|
||||
|
||||
# 요약 (dd.articleSummary)
|
||||
summary_tag = dl.select_one(".articleSummary")
|
||||
summary = ""
|
||||
press = ""
|
||||
date = ""
|
||||
|
||||
if summary_tag:
|
||||
# 불필요한 태그 제거
|
||||
for child in summary_tag.select(".press, .wdate"):
|
||||
if "press" in child.get("class", []):
|
||||
press = child.get_text(strip=True)
|
||||
if "wdate" in child.get("class", []):
|
||||
date = child.get_text(strip=True)
|
||||
child.extract()
|
||||
summary = summary_tag.get_text(strip=True)
|
||||
|
||||
articles.append({
|
||||
"title": title,
|
||||
"link": link,
|
||||
"summary": summary,
|
||||
"press": press,
|
||||
"date": date,
|
||||
"crawled_at": time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"category": "domestic"
|
||||
})
|
||||
|
||||
return articles
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"국내 뉴스 스크래핑 실패: {e}")
|
||||
return []
|
||||
|
||||
def fetch_major_indices() -> Dict[str, Any]:
|
||||
"""
|
||||
KOSPI, KOSDAQ, KOSPI200 등 주요 지표 (네이버 금융 홈)
|
||||
"""
|
||||
url = "https://finance.naver.com/"
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
|
||||
}
|
||||
try:
|
||||
targets = [
|
||||
{"key": "KOSPI", "selector": ".kospi_area", "url": "https://finance.naver.com/"},
|
||||
{"key": "KOSDAQ", "selector": ".kosdaq_area", "url": "https://finance.naver.com/"},
|
||||
{"key": "KOSPI200", "selector": ".kospi200_area", "url": "https://finance.naver.com/"},
|
||||
]
|
||||
|
||||
# 해외 지수 (네이버 금융 해외 메인) - 여기서는 별도 URL 호출 필요하거나, 메인에 있는지 확인
|
||||
# 네이버 메인에는 해외지수가 안 나옴. https://finance.naver.com/world/ 에서 긁어야 함
|
||||
# 그러나 한 번에 처리하기 위해 함수 내에서 추가 호출
|
||||
|
||||
indices = []
|
||||
|
||||
# --- 국내 ---
|
||||
resp_kr = requests.get("https://finance.naver.com/", headers=headers, timeout=5)
|
||||
soup_kr = BeautifulSoup(resp_kr.content, "html.parser", from_encoding="cp949")
|
||||
|
||||
for t in targets:
|
||||
area = soup_kr.select_one(t["selector"])
|
||||
if not area: continue
|
||||
|
||||
# (기존 파싱 로직)
|
||||
num_tag = area.select_one(".num")
|
||||
value = num_tag.get_text(strip=True) if num_tag else ""
|
||||
|
||||
change_val_tag = area.select_one(".num2")
|
||||
change_pct_tag = area.select_one(".num3")
|
||||
change_val = change_val_tag.get_text(strip=True) if change_val_tag else ""
|
||||
change_pct = change_pct_tag.get_text(strip=True) if change_pct_tag else ""
|
||||
|
||||
direction = ""
|
||||
if area.select_one(".bu_p"): direction = "red"
|
||||
elif area.select_one(".bu_m"): direction = "blue"
|
||||
|
||||
indices.append({
|
||||
"name": t["key"],
|
||||
"value": value,
|
||||
"change_value": change_val,
|
||||
"change_percent": change_pct,
|
||||
"direction": direction,
|
||||
"type": "domestic"
|
||||
})
|
||||
|
||||
# --- 해외 (DJI, NAS, SPI) ---
|
||||
try:
|
||||
resp_world = requests.get(NAVER_FINANCE_WORLD_URL, headers=headers, timeout=5)
|
||||
soup_world = BeautifulSoup(resp_world.content, "html.parser", from_encoding="cp949")
|
||||
|
||||
world_targets = [
|
||||
{"key": "DJI", "name": "다우산업", "sym": "DJI@DJI"},
|
||||
{"key": "NAS", "name": "나스닥", "sym": "NAS@IXIC"},
|
||||
{"key": "SPI", "name": "S&P500", "sym": "SPI@SPX"},
|
||||
]
|
||||
|
||||
for wt in world_targets:
|
||||
# 심볼 링크로 찾기 (가장 정확함)
|
||||
a_tag = soup_world.select_one(f"a[href*='symbol={wt['sym']}']")
|
||||
if not a_tag:
|
||||
continue
|
||||
|
||||
# 상위 dl 태그 찾기
|
||||
dl = a_tag.find_parent("dl")
|
||||
if not dl:
|
||||
continue
|
||||
|
||||
# 값 파싱 (dd.point_status)
|
||||
status_dd = dl.select_one("dd.point_status")
|
||||
if not status_dd:
|
||||
continue
|
||||
|
||||
# 1. 현재가 (strong)
|
||||
val_tag = status_dd.select_one("strong")
|
||||
value = val_tag.get_text(strip=True) if val_tag else ""
|
||||
|
||||
# 2. 등락폭 (em)
|
||||
change_val_tag = status_dd.select_one("em")
|
||||
change_val = change_val_tag.get_text(strip=True) if change_val_tag else ""
|
||||
|
||||
# 3. 등락률 (span)
|
||||
change_pct_tag = status_dd.select_one("span")
|
||||
change_pct = change_pct_tag.get_text(strip=True) if change_pct_tag else ""
|
||||
|
||||
# 4. 방향 (dl 클래스 활용)
|
||||
direction = ""
|
||||
dl_classes = dl.get("class", [])
|
||||
if "point_up" in dl_classes:
|
||||
direction = "red"
|
||||
elif "point_dn" in dl_classes:
|
||||
direction = "blue"
|
||||
|
||||
indices.append({
|
||||
"name": wt["name"], # 한글 이름 사용
|
||||
"value": value,
|
||||
"change_value": change_val,
|
||||
"change_percent": change_pct,
|
||||
"direction": direction,
|
||||
"type": "overseas"
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"해외 지수 스크래핑 실패: {e}")
|
||||
|
||||
# --- 환율 (USD/KRW) ---
|
||||
try:
|
||||
resp_ex = requests.get("https://finance.naver.com/marketindex/", headers=headers, timeout=5)
|
||||
soup_ex = BeautifulSoup(resp_ex.content, "html.parser", from_encoding="cp949")
|
||||
|
||||
usd_item = soup_ex.select_one("#exchangeList li.on > a.head.usd")
|
||||
if usd_item:
|
||||
value = usd_item.select_one(".value").get_text(strip=True)
|
||||
change_val = usd_item.select_one(".change").get_text(strip=True)
|
||||
|
||||
# 방향 (blind 텍스트: 상승, 하락)
|
||||
direction = ""
|
||||
blind_txt = usd_item.select_one(".blind").get_text(strip=True)
|
||||
if "상승" in blind_txt: direction = "red"
|
||||
elif "하락" in blind_txt: direction = "blue"
|
||||
|
||||
# change_val은 네이버 HTML에서 부호 없이 숫자만 옴 → direction 기반으로 부호 붙여줌
|
||||
# (프론트 getDirection()이 부호로 색/화살표를 판별하므로)
|
||||
if change_val and not change_val.startswith(("+", "-")):
|
||||
if direction == "red":
|
||||
change_val = f"+{change_val}"
|
||||
elif direction == "blue":
|
||||
change_val = f"-{change_val}"
|
||||
|
||||
indices.append({
|
||||
"name": "원달러 환율",
|
||||
"value": value,
|
||||
"change_value": change_val,
|
||||
"change_percent": "", # 메인 리스트에서 바로 안보임
|
||||
"direction": direction,
|
||||
"type": "exchange"
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"환율 스크래핑 실패: {e}")
|
||||
|
||||
return {"indices": indices, "crawled_at": time.strftime("%Y-%m-%d %H:%M:%S")}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"지수 스크래핑 전체 실패: {e}")
|
||||
return {"indices": [], "error": str(e)}
|
||||
12
stock/app/screener/__init__.py
Normal file
12
stock/app/screener/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
"""Stock screener — KRX 강세주 분석 노드 기반 보드.
|
||||
|
||||
See docs/superpowers/specs/2026-05-12-stock-screener-board-design.md
|
||||
"""
|
||||
|
||||
from .engine import Screener, ScreenContext, ScreenerResult
|
||||
from .registry import NODE_REGISTRY, GATE_REGISTRY
|
||||
|
||||
__all__ = [
|
||||
"Screener", "ScreenContext", "ScreenerResult",
|
||||
"NODE_REGISTRY", "GATE_REGISTRY",
|
||||
]
|
||||
76
stock/app/screener/_test_fixtures.py
Normal file
76
stock/app/screener/_test_fixtures.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""Synthetic fixtures for screener tests — no DB / no FDR / no naver."""
|
||||
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def make_master(tickers: list[str], market_caps: dict | None = None,
|
||||
preferred: set | None = None, managed: set | None = None) -> pd.DataFrame:
|
||||
market_caps = market_caps or {t: 100_000_000_000 for t in tickers}
|
||||
preferred = preferred or set()
|
||||
managed = managed or set()
|
||||
return pd.DataFrame([
|
||||
{
|
||||
"ticker": t,
|
||||
"name": f"테스트{t}",
|
||||
"market": "KOSPI",
|
||||
"market_cap": market_caps.get(t),
|
||||
"is_managed": int(t in managed),
|
||||
"is_preferred": int(t in preferred),
|
||||
"is_spac": 0,
|
||||
"listed_date": None,
|
||||
}
|
||||
for t in tickers
|
||||
]).set_index("ticker")
|
||||
|
||||
|
||||
def make_prices(tickers: list[str], days: int = 260, start_close: int = 50000,
|
||||
trend_pct: float = 0.0,
|
||||
asof: dt.date = dt.date(2026, 5, 12)) -> pd.DataFrame:
|
||||
"""trend_pct: 일별 종가 등락률(%). 양수면 상승 추세."""
|
||||
rows = []
|
||||
for t in tickers:
|
||||
close = start_close
|
||||
for i in range(days):
|
||||
day_idx = days - 1 - i # asof가 마지막
|
||||
date = asof - dt.timedelta(days=day_idx)
|
||||
high = int(close * 1.012)
|
||||
low = int(close * 0.988)
|
||||
rows.append({
|
||||
"ticker": t, "date": date.isoformat(),
|
||||
"open": close, "high": high, "low": low, "close": close,
|
||||
"volume": 1_000_000, "value": close * 1_000_000,
|
||||
})
|
||||
close = int(close * (1 + trend_pct / 100))
|
||||
return pd.DataFrame(rows)
|
||||
|
||||
|
||||
def make_flow(tickers: list[str], days: int = 260,
|
||||
foreign_per_day: dict | None = None,
|
||||
asof: dt.date = dt.date(2026, 5, 12)) -> pd.DataFrame:
|
||||
foreign_per_day = foreign_per_day or {t: 0 for t in tickers}
|
||||
rows = []
|
||||
for t in tickers:
|
||||
for i in range(days):
|
||||
day_idx = days - 1 - i
|
||||
date = asof - dt.timedelta(days=day_idx)
|
||||
rows.append({
|
||||
"ticker": t, "date": date.isoformat(),
|
||||
"foreign_net": foreign_per_day.get(t, 0),
|
||||
"institution_net": 0,
|
||||
})
|
||||
return pd.DataFrame(rows)
|
||||
|
||||
|
||||
def make_kospi(days: int = 260, start: int = 2500, trend_pct: float = 0.0,
|
||||
asof: dt.date = dt.date(2026, 5, 12)) -> pd.Series:
|
||||
values = []
|
||||
dates = []
|
||||
v = start
|
||||
for i in range(days):
|
||||
day_idx = days - 1 - i
|
||||
d = asof - dt.timedelta(days=day_idx)
|
||||
dates.append(d.isoformat())
|
||||
values.append(v)
|
||||
v = v * (1 + trend_pct / 100)
|
||||
return pd.Series(values, index=dates, name="kospi")
|
||||
0
stock/app/screener/ai_news/__init__.py
Normal file
0
stock/app/screener/ai_news/__init__.py
Normal file
103
stock/app/screener/ai_news/analyzer.py
Normal file
103
stock/app/screener/ai_news/analyzer.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Claude Haiku 기반 종목 뉴스 호재/악재 분석."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_MODEL = os.getenv("AI_NEWS_MODEL", "claude-haiku-4-5-20251001")
|
||||
|
||||
PROMPT_TEMPLATE = """다음은 종목 {name}({ticker})에 대한 최근 뉴스 {n}개의 헤드라인입니다.
|
||||
|
||||
{news_block}
|
||||
|
||||
이 뉴스들이 종목에 호재인지 악재인지 평가하세요.
|
||||
score: -10(매우 강한 악재) ~ +10(매우 강한 호재) 사이의 실수. 0은 중립.
|
||||
reason: 30자 이내 한 줄 근거.
|
||||
|
||||
JSON으로만 응답하세요. 다른 텍스트 금지:
|
||||
{{"score": <float>, "reason": "<string>"}}"""
|
||||
|
||||
|
||||
def _clamp(x: float, lo: float = -10.0, hi: float = 10.0) -> float:
|
||||
return max(lo, min(hi, x))
|
||||
|
||||
|
||||
def _format_news_block(news: List[Dict[str, Any]]) -> str:
|
||||
"""news dict 리스트 → prompt 에 들어가는 텍스트 블록.
|
||||
|
||||
summary 가 있으면 title 다음 줄에 indent 해서 포함 (최대 200자).
|
||||
pub_date 가 있으면 title 앞에 표시.
|
||||
"""
|
||||
lines: List[str] = []
|
||||
for n in news:
|
||||
date = (n.get("pub_date") or "").strip()
|
||||
title = (n.get("title") or "").strip()
|
||||
summary = (n.get("summary") or "").strip()
|
||||
prefix = f"[{date}] " if date else ""
|
||||
if summary:
|
||||
lines.append(f"- {prefix}{title}\n {summary[:200]}")
|
||||
else:
|
||||
lines.append(f"- {prefix}{title}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
async def score_sentiment(
|
||||
llm,
|
||||
ticker: str,
|
||||
news: List[Dict[str, Any]],
|
||||
*,
|
||||
name: str | None = None,
|
||||
model: str = DEFAULT_MODEL,
|
||||
) -> Dict[str, Any]:
|
||||
"""Returns {ticker, score_raw, reason, news_count, tokens_input, tokens_output, model}."""
|
||||
news_block = _format_news_block(news)
|
||||
prompt = PROMPT_TEMPLATE.format(
|
||||
name=name or ticker, ticker=ticker,
|
||||
n=len(news), news_block=news_block,
|
||||
)
|
||||
resp = await llm.messages.create(
|
||||
model=model,
|
||||
max_tokens=200,
|
||||
temperature=0,
|
||||
system="너는 한국 주식 뉴스 감성 분석가다. JSON 객체 하나만 반환한다.",
|
||||
messages=[
|
||||
{"role": "user", "content": prompt},
|
||||
# Assistant prefill — 첫 토큰을 강제로 '{' 로 시작해 JSON 응답을 보장
|
||||
{"role": "assistant", "content": "{"},
|
||||
],
|
||||
)
|
||||
raw = resp.content[0].text if resp.content else ""
|
||||
# prefill '{' 이 응답에 포함되지 않으므로 다시 붙임
|
||||
text = "{" + raw if not raw.lstrip().startswith("{") else raw
|
||||
in_tokens = int(getattr(resp.usage, "input_tokens", 0) or 0)
|
||||
out_tokens = int(getattr(resp.usage, "output_tokens", 0) or 0)
|
||||
|
||||
try:
|
||||
data = json.loads(text)
|
||||
score = _clamp(float(data["score"]))
|
||||
reason = str(data["reason"])[:200]
|
||||
return {
|
||||
"ticker": ticker,
|
||||
"score_raw": score,
|
||||
"reason": reason,
|
||||
"news_count": len(news),
|
||||
"tokens_input": in_tokens,
|
||||
"tokens_output": out_tokens,
|
||||
"model": model,
|
||||
}
|
||||
except (json.JSONDecodeError, KeyError, TypeError, ValueError) as e:
|
||||
log.warning("ai_news parse fail for %s: %s (raw=%r)", ticker, e, text[:100])
|
||||
return {
|
||||
"ticker": ticker,
|
||||
"score_raw": 0.0,
|
||||
"reason": f"parse fail: {e!s}"[:200],
|
||||
"news_count": len(news),
|
||||
"tokens_input": in_tokens,
|
||||
"tokens_output": out_tokens,
|
||||
"model": model,
|
||||
}
|
||||
70
stock/app/screener/ai_news/articles_source.py
Normal file
70
stock/app/screener/ai_news/articles_source.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""기존 articles 테이블에서 종목별 뉴스 매핑."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
import logging
|
||||
import sqlite3
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def gather_articles_for_tickers(
|
||||
conn: sqlite3.Connection,
|
||||
tickers: List[str],
|
||||
asof: dt.date,
|
||||
*,
|
||||
window_days: int = 1,
|
||||
max_per_ticker: int = 5,
|
||||
) -> Tuple[Dict[str, List[Dict[str, Any]]], Dict[str, int]]:
|
||||
"""articles 에서 ticker.name substring 매칭으로 종목별 뉴스 dict 반환.
|
||||
|
||||
Returns:
|
||||
(
|
||||
{ticker: [{"title": str, "summary": str, "press": str, "pub_date": str}, ...]},
|
||||
{"total_articles": int, "matched_pairs": int, "hit_tickers": int},
|
||||
)
|
||||
"""
|
||||
out: Dict[str, List[Dict[str, Any]]] = {t: [] for t in tickers}
|
||||
stats = {"total_articles": 0, "matched_pairs": 0, "hit_tickers": 0}
|
||||
|
||||
if not tickers:
|
||||
return out, stats
|
||||
|
||||
cutoff = (asof - dt.timedelta(days=window_days)).isoformat()
|
||||
|
||||
placeholders = ",".join("?" * len(tickers))
|
||||
name_rows = conn.execute(
|
||||
f"SELECT ticker, name FROM krx_master WHERE ticker IN ({placeholders})",
|
||||
tickers,
|
||||
).fetchall()
|
||||
# 2글자 미만 회사명은 false positive 위험으로 제외
|
||||
name_map = {r[0]: r[1] for r in name_rows if r[1] and len(r[1]) >= 2}
|
||||
|
||||
articles = conn.execute(
|
||||
"SELECT title, summary, press, pub_date, crawled_at "
|
||||
"FROM articles WHERE crawled_at >= ? ORDER BY crawled_at DESC",
|
||||
(cutoff,),
|
||||
).fetchall()
|
||||
stats["total_articles"] = len(articles)
|
||||
|
||||
for a in articles:
|
||||
title = (a[0] or "").strip()
|
||||
summary = (a[1] or "").strip()
|
||||
haystack = title + " " + summary
|
||||
for ticker, name in name_map.items():
|
||||
if name not in haystack:
|
||||
continue
|
||||
if len(out[ticker]) >= max_per_ticker:
|
||||
continue
|
||||
out[ticker].append({
|
||||
"title": title,
|
||||
"summary": summary,
|
||||
"press": a[2] or "",
|
||||
"pub_date": a[3] or "",
|
||||
})
|
||||
stats["matched_pairs"] += 1
|
||||
|
||||
stats["hit_tickers"] = sum(1 for arts in out.values() if arts)
|
||||
return out, stats
|
||||
141
stock/app/screener/ai_news/pipeline.py
Normal file
141
stock/app/screener/ai_news/pipeline.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""ai_news refresh pipeline — 시총 상위 N종목 병렬 처리."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import datetime as dt
|
||||
import logging
|
||||
import os
|
||||
import sqlite3
|
||||
import time
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from . import scraper as _scraper # legacy, kept for backward import
|
||||
from . import analyzer as _analyzer
|
||||
from . import articles_source # 신규
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_TOP_N = 100
|
||||
DEFAULT_CONCURRENCY = 10
|
||||
DEFAULT_NEWS_PER_TICKER = 5
|
||||
|
||||
|
||||
def _top_market_cap_tickers(conn: sqlite3.Connection, n: int) -> List[str]:
|
||||
rows = conn.execute(
|
||||
"SELECT ticker FROM krx_master "
|
||||
"WHERE market_cap IS NOT NULL AND is_preferred=0 AND is_spac=0 "
|
||||
"ORDER BY market_cap DESC LIMIT ?",
|
||||
(n,),
|
||||
).fetchall()
|
||||
return [r[0] for r in rows]
|
||||
|
||||
|
||||
def _make_llm():
|
||||
"""Anthropic AsyncClient — env에 ANTHROPIC_API_KEY 필수."""
|
||||
from anthropic import AsyncAnthropic
|
||||
return AsyncAnthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
|
||||
|
||||
|
||||
async def _process_one(
|
||||
ticker: str, name: str, articles: List[Dict[str, Any]],
|
||||
sem: asyncio.Semaphore, llm, model: str,
|
||||
) -> Dict[str, Any]:
|
||||
async with sem:
|
||||
return await _analyzer.score_sentiment(
|
||||
llm, ticker, articles, name=name, model=model,
|
||||
)
|
||||
|
||||
|
||||
def _upsert_news_sentiment(
|
||||
conn: sqlite3.Connection, asof: dt.date,
|
||||
rows: List[Dict[str, Any]], *, source: str = "articles",
|
||||
) -> None:
|
||||
iso = asof.isoformat()
|
||||
data = [
|
||||
(
|
||||
r["ticker"], iso, r["score_raw"], r["reason"], r["news_count"],
|
||||
r["tokens_input"], r["tokens_output"], r["model"], source,
|
||||
)
|
||||
for r in rows
|
||||
]
|
||||
conn.executemany(
|
||||
"""INSERT INTO news_sentiment
|
||||
(ticker, date, score_raw, reason, news_count,
|
||||
tokens_input, tokens_output, model, source)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(ticker, date) DO UPDATE SET
|
||||
score_raw=excluded.score_raw,
|
||||
reason=excluded.reason,
|
||||
news_count=excluded.news_count,
|
||||
tokens_input=excluded.tokens_input,
|
||||
tokens_output=excluded.tokens_output,
|
||||
model=excluded.model,
|
||||
source=excluded.source
|
||||
""",
|
||||
data,
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
|
||||
async def refresh_daily(
|
||||
conn: sqlite3.Connection,
|
||||
asof: dt.date,
|
||||
*,
|
||||
top_n: int = DEFAULT_TOP_N,
|
||||
concurrency: int = DEFAULT_CONCURRENCY,
|
||||
max_news_per_ticker: int = DEFAULT_NEWS_PER_TICKER,
|
||||
window_days: int = 1,
|
||||
model: str = _analyzer.DEFAULT_MODEL,
|
||||
) -> Dict[str, Any]:
|
||||
started = time.time()
|
||||
tickers = _top_market_cap_tickers(conn, n=top_n)
|
||||
name_map = {
|
||||
r[0]: r[1] for r in conn.execute(
|
||||
f"SELECT ticker, name FROM krx_master WHERE ticker IN "
|
||||
f"({','.join('?' * len(tickers))})", tickers,
|
||||
).fetchall()
|
||||
} if tickers else {}
|
||||
|
||||
articles_by_ticker, mapping_stats = articles_source.gather_articles_for_tickers(
|
||||
conn, tickers, asof,
|
||||
window_days=window_days,
|
||||
max_per_ticker=max_news_per_ticker,
|
||||
)
|
||||
|
||||
sem = asyncio.Semaphore(concurrency)
|
||||
async with _make_llm() as llm:
|
||||
tasks = []
|
||||
for t in tickers:
|
||||
arts = articles_by_ticker.get(t, [])
|
||||
if not arts:
|
||||
continue # 매핑 0 — score 미생성
|
||||
tasks.append(_process_one(t, name_map.get(t, t), arts, sem, llm, model))
|
||||
raw_results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
successes: List[Dict[str, Any]] = []
|
||||
failures: List[str] = []
|
||||
for r in raw_results:
|
||||
if isinstance(r, BaseException):
|
||||
failures.append(repr(r))
|
||||
elif isinstance(r, dict):
|
||||
successes.append(r)
|
||||
|
||||
if successes:
|
||||
_upsert_news_sentiment(conn, asof, successes, source="articles")
|
||||
|
||||
top_pos = sorted(successes, key=lambda r: -r["score_raw"])[:5]
|
||||
top_neg = sorted(successes, key=lambda r: r["score_raw"])[:5]
|
||||
|
||||
return {
|
||||
"asof": asof.isoformat(),
|
||||
"updated": len(successes),
|
||||
"failures": failures,
|
||||
"duration_sec": round(time.time() - started, 2),
|
||||
"tokens_input": sum(r["tokens_input"] for r in successes),
|
||||
"tokens_output": sum(r["tokens_output"] for r in successes),
|
||||
"top_pos": top_pos,
|
||||
"top_neg": top_neg,
|
||||
"model": model,
|
||||
"mapping": mapping_stats,
|
||||
}
|
||||
46
stock/app/screener/ai_news/scraper.py
Normal file
46
stock/app/screener/ai_news/scraper.py
Normal file
@@ -0,0 +1,46 @@
|
||||
"""[DEPRECATED] 네이버 finance 종목 뉴스 스크래핑.
|
||||
|
||||
본 모듈은 ai_news Phase 1 (2026-05-14) 에서 더 이상 파이프라인에서 사용되지 않음.
|
||||
데이터 소스는 stock 의 articles 테이블 (ai_news/articles_source.py) 로 전환됨.
|
||||
|
||||
삭제 시점: Phase 2 (DART 도입) 결정 후. IC 검증 4주 누적 후 노드 활성화
|
||||
여부에 따라 본 모듈을 (a) 완전 삭제 또는 (b) ensemble fallback 으로 재활용.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
NAVER_NEWS_URL = "https://finance.naver.com/item/news_news.naver"
|
||||
NAVER_HEADERS = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
||||
"Referer": "https://finance.naver.com/",
|
||||
}
|
||||
|
||||
|
||||
async def fetch_news(client, ticker: str, n: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Scrape top N news headlines for a ticker. Returns [] on any failure."""
|
||||
try:
|
||||
r = await client.get(NAVER_NEWS_URL, params={"code": ticker, "page": 1})
|
||||
except Exception as e:
|
||||
log.warning("ai_news scrape http error for %s: %s", ticker, e)
|
||||
return []
|
||||
if r.status_code != 200:
|
||||
return []
|
||||
soup = BeautifulSoup(r.text, "lxml")
|
||||
out: List[Dict[str, Any]] = []
|
||||
for row in soup.select("table.type5 tbody tr")[:n]:
|
||||
title_el = row.select_one("td.title a")
|
||||
date_el = row.select_one("td.date")
|
||||
if not title_el or not date_el:
|
||||
continue
|
||||
out.append({
|
||||
"title": title_el.get_text(strip=True),
|
||||
"date": date_el.get_text(strip=True),
|
||||
})
|
||||
return out
|
||||
73
stock/app/screener/ai_news/telegram.py
Normal file
73
stock/app/screener/ai_news/telegram.py
Normal file
@@ -0,0 +1,73 @@
|
||||
"""ai_news Top 5/5 텔레그램 메시지 빌더 (MarkdownV2)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
|
||||
_MD_SPECIAL = r"_*[]()~`>#+-=|{}.!\\"
|
||||
|
||||
|
||||
def _escape(text: str) -> str:
|
||||
return "".join("\\" + c if c in _MD_SPECIAL else c for c in str(text))
|
||||
|
||||
|
||||
def _cost_won(tokens_input: int, tokens_output: int) -> int:
|
||||
"""Claude Haiku 가격 환산 (대략): in $1/M × ₩1300, out $5/M × ₩1300."""
|
||||
return int(tokens_input * 0.0013 + tokens_output * 0.0065)
|
||||
|
||||
|
||||
def _row_line(idx: int, r: Dict[str, Any]) -> str:
|
||||
score = r["score_raw"]
|
||||
# score 문자열 자체를 _escape 통과 — '+', '-', '.' 모두 MarkdownV2 reserved
|
||||
score_str = _escape(f"{score:+.1f}")
|
||||
name = r.get("name") or ""
|
||||
ticker = r["ticker"]
|
||||
label = (
|
||||
f"{_escape(name)} \\({_escape(ticker)}\\)"
|
||||
if name else _escape(ticker)
|
||||
)
|
||||
return f"{idx}\\. {label} \\({score_str}\\) — {_escape(r['reason'])}"
|
||||
|
||||
|
||||
def build_message(
|
||||
*,
|
||||
asof: str,
|
||||
top_pos: List[Dict[str, Any]],
|
||||
top_neg: List[Dict[str, Any]],
|
||||
tokens_input: int,
|
||||
tokens_output: int,
|
||||
mapping: Dict[str, int] | None = None,
|
||||
) -> str:
|
||||
lines: List[str] = [
|
||||
f"🌅 *AI 뉴스 분석* \\({_escape(asof)} 08:00\\)",
|
||||
"",
|
||||
"📈 *호재 Top 5*",
|
||||
]
|
||||
if top_pos:
|
||||
for i, r in enumerate(top_pos, 1):
|
||||
lines.append(_row_line(i, r))
|
||||
else:
|
||||
lines.append(_escape("- (없음)"))
|
||||
|
||||
lines += ["", "📉 *악재 Top 5*"]
|
||||
if top_neg:
|
||||
for i, r in enumerate(top_neg, 1):
|
||||
lines.append(_row_line(i, r))
|
||||
else:
|
||||
lines.append(_escape("- (없음)"))
|
||||
|
||||
cost = _cost_won(tokens_input, tokens_output)
|
||||
mapping_part = ""
|
||||
if mapping:
|
||||
mapping_part = (
|
||||
f"매핑 {mapping['hit_tickers']}/100 ticker "
|
||||
f"\\({mapping['matched_pairs']}쌍 / articles {mapping['total_articles']}건\\) · "
|
||||
)
|
||||
lines += [
|
||||
"",
|
||||
f"_분석: 시총 상위 100종목 · {mapping_part}"
|
||||
f"토큰 {tokens_input:,} in / {tokens_output:,} out · "
|
||||
f"약 ₩{cost:,}_",
|
||||
]
|
||||
return "\n".join(lines)
|
||||
125
stock/app/screener/ai_news/validation.py
Normal file
125
stock/app/screener/ai_news/validation.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""AI news sentiment validation — Spearman IC vs forward returns.
|
||||
|
||||
핵심 metric: 일자별 score_raw 와 다음 N일 forward return 의 Spearman 상관.
|
||||
4주+ 누적 후 IC mean > 0.05 면 weight 활성화 가치 있음.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
import sqlite3
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def _spearman(a: pd.Series, b: pd.Series) -> Optional[float]:
|
||||
"""Spearman rank correlation. None if insufficient/degenerate data."""
|
||||
if len(a) < 5 or len(b) < 5:
|
||||
return None
|
||||
if a.std(ddof=0) == 0 or b.std(ddof=0) == 0:
|
||||
return None
|
||||
return float(a.rank().corr(b.rank()))
|
||||
|
||||
|
||||
def compute_ic(
|
||||
conn: sqlite3.Connection,
|
||||
*,
|
||||
days: int = 30,
|
||||
horizon: int = 1,
|
||||
min_news_count: int = 1,
|
||||
asof_today: Optional[dt.date] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Compute daily Spearman IC of ai_news.score_raw vs forward return.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"horizon_days": int,
|
||||
"min_news_count": int,
|
||||
"window_days": int,
|
||||
"ic_count": int, # 유효 일수
|
||||
"ic_mean": float | None,
|
||||
"ic_std": float | None,
|
||||
"ic_per_day": [{"date": "YYYY-MM-DD", "ic": float, "n": int}, ...],
|
||||
"verdict": "skip" | "weak" | "strong",
|
||||
}
|
||||
|
||||
verdict:
|
||||
- skip: ic_count < 10
|
||||
- weak: ic_mean in [-0.05, 0.05]
|
||||
- strong: |ic_mean| > 0.05
|
||||
"""
|
||||
asof_today = asof_today or dt.date.today()
|
||||
cutoff = (asof_today - dt.timedelta(days=days)).isoformat()
|
||||
|
||||
sentiment = pd.read_sql_query(
|
||||
"SELECT ticker, date, score_raw, news_count "
|
||||
"FROM news_sentiment WHERE date >= ? AND news_count >= ? ORDER BY date",
|
||||
conn, params=(cutoff, min_news_count),
|
||||
)
|
||||
if sentiment.empty:
|
||||
return _empty_result(days, horizon, min_news_count)
|
||||
|
||||
# forward return 조회: 각 (ticker, date) 에 대해 close[date+horizon] / close[date] - 1
|
||||
prices = pd.read_sql_query(
|
||||
"SELECT ticker, date, close FROM krx_daily_prices "
|
||||
"WHERE date >= ? ORDER BY ticker, date",
|
||||
conn, params=(cutoff,),
|
||||
)
|
||||
if prices.empty:
|
||||
return _empty_result(days, horizon, min_news_count)
|
||||
|
||||
prices = prices.sort_values(["ticker", "date"])
|
||||
prices["fwd_close"] = prices.groupby("ticker", group_keys=False)["close"].shift(-horizon)
|
||||
prices["fwd_ret"] = prices["fwd_close"] / prices["close"] - 1.0
|
||||
|
||||
merged = sentiment.merge(
|
||||
prices[["ticker", "date", "fwd_ret"]], on=["ticker", "date"], how="inner"
|
||||
)
|
||||
merged = merged.dropna(subset=["fwd_ret"])
|
||||
if merged.empty:
|
||||
return _empty_result(days, horizon, min_news_count)
|
||||
|
||||
ic_rows: List[Dict[str, Any]] = []
|
||||
for date, grp in merged.groupby("date"):
|
||||
ic = _spearman(grp["score_raw"], grp["fwd_ret"])
|
||||
if ic is not None:
|
||||
ic_rows.append({"date": date, "ic": ic, "n": int(len(grp))})
|
||||
|
||||
if not ic_rows:
|
||||
return _empty_result(days, horizon, min_news_count)
|
||||
|
||||
ic_series = pd.Series([r["ic"] for r in ic_rows], dtype=float)
|
||||
ic_mean = float(ic_series.mean())
|
||||
ic_std = float(ic_series.std(ddof=0)) if len(ic_series) > 1 else 0.0
|
||||
|
||||
if len(ic_rows) < 10:
|
||||
verdict = "skip"
|
||||
elif abs(ic_mean) > 0.05:
|
||||
verdict = "strong"
|
||||
else:
|
||||
verdict = "weak"
|
||||
|
||||
return {
|
||||
"horizon_days": horizon,
|
||||
"min_news_count": min_news_count,
|
||||
"window_days": days,
|
||||
"ic_count": len(ic_rows),
|
||||
"ic_mean": round(ic_mean, 4),
|
||||
"ic_std": round(ic_std, 4),
|
||||
"ic_per_day": ic_rows,
|
||||
"verdict": verdict,
|
||||
}
|
||||
|
||||
|
||||
def _empty_result(days: int, horizon: int, min_news_count: int) -> Dict[str, Any]:
|
||||
return {
|
||||
"horizon_days": horizon,
|
||||
"min_news_count": min_news_count,
|
||||
"window_days": days,
|
||||
"ic_count": 0,
|
||||
"ic_mean": None,
|
||||
"ic_std": None,
|
||||
"ic_per_day": [],
|
||||
"verdict": "skip",
|
||||
}
|
||||
167
stock/app/screener/engine.py
Normal file
167
stock/app/screener/engine.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""Screener engine — ScreenContext (Phase 0) + Screener/combine (Phase 2)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
import sqlite3
|
||||
from dataclasses import dataclass, replace
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ScreenContext:
|
||||
"""1회 실행 동안 공유되는 읽기 전용 데이터 컨테이너."""
|
||||
master: pd.DataFrame # index=ticker
|
||||
prices: pd.DataFrame # cols: ticker,date,open,high,low,close,volume,value
|
||||
flow: pd.DataFrame # cols: ticker,date,foreign_net,institution_net
|
||||
kospi: pd.Series # index=date(str), name="kospi"
|
||||
asof: dt.date
|
||||
news_sentiment: "pd.DataFrame | None" = None
|
||||
|
||||
@classmethod
|
||||
def load(cls, conn: sqlite3.Connection, asof: dt.date,
|
||||
lookback_days: int = 252 * 2) -> "ScreenContext":
|
||||
cutoff = (asof - dt.timedelta(days=int(lookback_days * 1.5))).isoformat()
|
||||
asof_iso = asof.isoformat()
|
||||
|
||||
master = pd.read_sql_query(
|
||||
"SELECT * FROM krx_master",
|
||||
conn, index_col="ticker",
|
||||
)
|
||||
prices = pd.read_sql_query(
|
||||
"SELECT ticker,date,open,high,low,close,volume,value "
|
||||
"FROM krx_daily_prices WHERE date BETWEEN ? AND ? ORDER BY date",
|
||||
conn, params=(cutoff, asof_iso),
|
||||
)
|
||||
flow = pd.read_sql_query(
|
||||
"SELECT ticker,date,foreign_net,institution_net "
|
||||
"FROM krx_flow WHERE date BETWEEN ? AND ? ORDER BY date",
|
||||
conn, params=(cutoff, asof_iso),
|
||||
)
|
||||
news_sentiment = pd.read_sql_query(
|
||||
"SELECT ticker, score_raw, news_count FROM news_sentiment WHERE date = ?",
|
||||
conn, params=(asof_iso,),
|
||||
)
|
||||
|
||||
# KOSPI 지수: MVP에서는 005930(삼성전자) 종가를 시장 대용으로 사용.
|
||||
# 후속 슬라이스에서 ^KS11 별도 캐시.
|
||||
kospi = pd.Series(dtype=float, name="kospi")
|
||||
if "005930" in master.index and not prices.empty:
|
||||
sub = prices[prices["ticker"] == "005930"].set_index("date")["close"]
|
||||
kospi = sub.copy()
|
||||
kospi.name = "kospi"
|
||||
|
||||
return cls(master=master, prices=prices, flow=flow, kospi=kospi, asof=asof,
|
||||
news_sentiment=news_sentiment)
|
||||
|
||||
def restrict(self, tickers) -> "ScreenContext":
|
||||
tickers = pd.Index(tickers)
|
||||
return replace(
|
||||
self,
|
||||
master=self.master.loc[self.master.index.intersection(tickers)],
|
||||
prices=self.prices[self.prices["ticker"].isin(tickers)],
|
||||
flow=self.flow[self.flow["ticker"].isin(tickers)],
|
||||
)
|
||||
|
||||
def latest_close(self) -> pd.Series:
|
||||
if self.prices.empty:
|
||||
return pd.Series(dtype=float)
|
||||
return self.prices.sort_values("date").groupby("ticker")["close"].last()
|
||||
|
||||
def latest_high(self) -> pd.Series:
|
||||
if self.prices.empty:
|
||||
return pd.Series(dtype=float)
|
||||
return self.prices.sort_values("date").groupby("ticker")["high"].last()
|
||||
|
||||
|
||||
# ---- combine + Screener (Phase 2) ----
|
||||
|
||||
from . import position_sizer as _ps
|
||||
|
||||
|
||||
def combine(scores: dict, weights: dict) -> pd.Series:
|
||||
"""Weighted average across score nodes. ValueError if all weights = 0."""
|
||||
active = {k: w for k, w in weights.items() if w > 0 and k in scores}
|
||||
if not active:
|
||||
raise ValueError("no active score nodes (all weights = 0)")
|
||||
|
||||
df = pd.DataFrame({k: scores[k] for k in active})
|
||||
w = pd.Series(active)
|
||||
weighted = (df.fillna(0).multiply(w, axis=1)).sum(axis=1) / w.sum()
|
||||
return weighted
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScreenerResult:
|
||||
asof: dt.date
|
||||
survivors_count: int
|
||||
scores: dict # node name → pd.Series
|
||||
weights: dict
|
||||
ranked: pd.Series # ticker → total_score (sorted desc, head=top_n)
|
||||
rows: list # list of dicts (for serialization)
|
||||
warnings: list
|
||||
|
||||
|
||||
class Screener:
|
||||
def __init__(self, gate, score_nodes, weights: dict, node_params: dict,
|
||||
gate_params: dict, top_n: int = 20, sizer_params: dict = None):
|
||||
self.gate = gate
|
||||
self.score_nodes = score_nodes
|
||||
self.weights = weights
|
||||
self.node_params = node_params
|
||||
self.gate_params = gate_params
|
||||
self.top_n = top_n
|
||||
self.sizer_params = sizer_params or {"atr_window": 14, "atr_stop_mult": 2.0, "rr_ratio": 2.0}
|
||||
|
||||
def run(self, ctx: ScreenContext) -> ScreenerResult:
|
||||
warnings: list = []
|
||||
|
||||
survivors = self.gate.filter(ctx, self.gate_params)
|
||||
if len(survivors) == 0:
|
||||
raise ValueError("no survivors after hygiene gate")
|
||||
if len(survivors) < 100:
|
||||
warnings.append(f"survivors_count={len(survivors)} < 100 — 백분위 정규화 신뢰도 낮음")
|
||||
|
||||
scoped = ctx.restrict(survivors)
|
||||
scores: dict = {}
|
||||
for n in self.score_nodes:
|
||||
w = self.weights.get(n.name, 0)
|
||||
if w <= 0:
|
||||
continue
|
||||
try:
|
||||
scores[n.name] = n.compute(scoped, self.node_params.get(n.name, {}))
|
||||
except Exception as e:
|
||||
warnings.append(f"node '{n.name}' failed: {e}")
|
||||
scores[n.name] = pd.Series(0.0, index=scoped.master.index)
|
||||
|
||||
total = combine(scores, self.weights)
|
||||
ranked = total.sort_values(ascending=False).head(self.top_n)
|
||||
|
||||
sizing = _ps.plan_positions(scoped, list(ranked.index), self.sizer_params)
|
||||
latest_close = scoped.latest_close()
|
||||
|
||||
rows = []
|
||||
for rank_idx, ticker in enumerate(ranked.index, start=1):
|
||||
s = sizing.get(ticker, {})
|
||||
row = {
|
||||
"rank": rank_idx,
|
||||
"ticker": ticker,
|
||||
"name": str(scoped.master.loc[ticker, "name"]),
|
||||
"total_score": float(ranked.loc[ticker]),
|
||||
"scores": {k: float(v.get(ticker, 0.0)) for k, v in scores.items()},
|
||||
"close": int(latest_close.get(ticker, 0)),
|
||||
"market_cap": int(scoped.master.loc[ticker, "market_cap"] or 0),
|
||||
"entry_price": s.get("entry_price"),
|
||||
"stop_price": s.get("stop_price"),
|
||||
"target_price": s.get("target_price"),
|
||||
"atr14": s.get("atr14"),
|
||||
"r_pct": s.get("r_pct"),
|
||||
}
|
||||
rows.append(row)
|
||||
|
||||
return ScreenerResult(
|
||||
asof=ctx.asof, survivors_count=len(survivors),
|
||||
scores=scores, weights=self.weights,
|
||||
ranked=ranked, rows=rows, warnings=warnings,
|
||||
)
|
||||
0
stock/app/screener/nodes/__init__.py
Normal file
0
stock/app/screener/nodes/__init__.py
Normal file
36
stock/app/screener/nodes/ai_news.py
Normal file
36
stock/app/screener/nodes/ai_news.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""AI 뉴스 호재/악재 점수 노드.
|
||||
|
||||
ScreenContext.news_sentiment (DataFrame: ticker, score_raw, news_count) 를
|
||||
min_news_count 로 필터한 뒤 percentile_rank 로 0~100 변환.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .base import ScoreNode, percentile_rank
|
||||
|
||||
|
||||
class AiNewsSentiment(ScoreNode):
|
||||
name = "ai_news"
|
||||
label = "AI 뉴스 호재/악재"
|
||||
default_params = {"min_news_count": 1}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"min_news_count": {
|
||||
"type": "integer", "minimum": 0, "default": 1,
|
||||
"description": "최소 분석 뉴스 수. 미만이면 점수 미산출.",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def compute(self, ctx, params: dict) -> pd.Series:
|
||||
df = getattr(ctx, "news_sentiment", None)
|
||||
if df is None or df.empty:
|
||||
return pd.Series(dtype=float)
|
||||
min_news = int(params.get("min_news_count", 1))
|
||||
df = df[df["news_count"] >= min_news]
|
||||
if df.empty:
|
||||
return pd.Series(dtype=float)
|
||||
return percentile_rank(df.set_index("ticker")["score_raw"])
|
||||
40
stock/app/screener/nodes/base.py
Normal file
40
stock/app/screener/nodes/base.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""Node base classes + helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, ClassVar
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
class ScoreNode(ABC):
|
||||
name: ClassVar[str]
|
||||
label: ClassVar[str]
|
||||
default_params: ClassVar[dict]
|
||||
param_schema: ClassVar[dict]
|
||||
|
||||
@abstractmethod
|
||||
def compute(self, ctx: "Any", params: dict) -> pd.Series:
|
||||
"""returns Series indexed by ticker, 0..100 float."""
|
||||
|
||||
|
||||
class GateNode(ABC):
|
||||
name: ClassVar[str]
|
||||
label: ClassVar[str]
|
||||
default_params: ClassVar[dict]
|
||||
param_schema: ClassVar[dict]
|
||||
|
||||
@abstractmethod
|
||||
def filter(self, ctx: "Any", params: dict) -> pd.Index:
|
||||
"""returns surviving tickers."""
|
||||
|
||||
|
||||
def percentile_rank(series: pd.Series) -> pd.Series:
|
||||
"""Percentile rank in [0, 100]. All-equal → 50. NaN preserved."""
|
||||
if series.empty:
|
||||
return series.astype(float)
|
||||
if series.dropna().nunique() == 1:
|
||||
return pd.Series(50.0, index=series.index)
|
||||
ranked = series.rank(pct=True, na_option="keep") * 100.0
|
||||
return ranked
|
||||
33
stock/app/screener/nodes/foreign_buy.py
Normal file
33
stock/app/screener/nodes/foreign_buy.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""외국인 N일 누적 순매수 강도 (시총 대비)."""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .base import ScoreNode, percentile_rank
|
||||
|
||||
|
||||
class ForeignBuy(ScoreNode):
|
||||
name = "foreign_buy"
|
||||
label = "외국인 누적 순매수"
|
||||
default_params = {"window_days": 5}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"window_days": {"type": "integer", "minimum": 1, "maximum": 60, "default": 5}
|
||||
},
|
||||
}
|
||||
|
||||
def compute(self, ctx, params: dict) -> pd.Series:
|
||||
window = int(params.get("window_days", 5))
|
||||
flow = ctx.flow
|
||||
if flow.empty:
|
||||
return pd.Series(dtype=float)
|
||||
|
||||
last_dates = (
|
||||
flow.sort_values("date").groupby("ticker").tail(window)
|
||||
)
|
||||
net_sum = last_dates.groupby("ticker")["foreign_net"].sum()
|
||||
|
||||
market_cap = ctx.master["market_cap"].fillna(0).reindex(net_sum.index)
|
||||
raw = (net_sum / market_cap.replace(0, pd.NA)).astype(float)
|
||||
|
||||
return percentile_rank(raw).fillna(50.0)
|
||||
30
stock/app/screener/nodes/high52w.py
Normal file
30
stock/app/screener/nodes/high52w.py
Normal file
@@ -0,0 +1,30 @@
|
||||
"""52주 신고가 근접도 (룰 기반: 70% 미만 0점, 100% 도달 100점, 선형)."""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .base import ScoreNode
|
||||
|
||||
|
||||
class High52WProximity(ScoreNode):
|
||||
name = "high52w"
|
||||
label = "52주 신고가 근접도"
|
||||
default_params = {"window_days": 252}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"window_days": {"type": "integer", "minimum": 60, "maximum": 504, "default": 252}
|
||||
},
|
||||
}
|
||||
|
||||
def compute(self, ctx, params: dict) -> pd.Series:
|
||||
window = int(params.get("window_days", 252))
|
||||
prices = ctx.prices
|
||||
if prices.empty:
|
||||
return pd.Series(dtype=float)
|
||||
|
||||
ordered = prices.sort_values("date")
|
||||
last = ordered.groupby("ticker").tail(window)
|
||||
agg = last.groupby("ticker").agg(close=("close", "last"), high=("high", "max"))
|
||||
proximity = (agg["close"] / agg["high"]).clip(upper=1.0)
|
||||
score = ((proximity - 0.7) / 0.3).clip(lower=0.0, upper=1.0) * 100.0
|
||||
return score.fillna(0.0)
|
||||
81
stock/app/screener/nodes/hygiene.py
Normal file
81
stock/app/screener/nodes/hygiene.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""HygieneGate — pre-filter for screener."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .base import GateNode
|
||||
|
||||
|
||||
class HygieneGate(GateNode):
|
||||
name = "hygiene"
|
||||
label = "위생 게이트"
|
||||
default_params = {
|
||||
"min_market_cap_won": 50_000_000_000,
|
||||
"min_avg_value_won": 500_000_000,
|
||||
"min_listed_days": 60,
|
||||
"skip_managed": True,
|
||||
"skip_preferred": True,
|
||||
"skip_spac": True,
|
||||
"skip_halted_days": 3,
|
||||
}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"min_market_cap_won": {"type": "integer", "minimum": 0},
|
||||
"min_avg_value_won": {"type": "integer", "minimum": 0},
|
||||
"min_listed_days": {"type": "integer", "minimum": 0},
|
||||
"skip_managed": {"type": "boolean"},
|
||||
"skip_preferred": {"type": "boolean"},
|
||||
"skip_spac": {"type": "boolean"},
|
||||
"skip_halted_days": {"type": "integer", "minimum": 0},
|
||||
},
|
||||
}
|
||||
|
||||
def filter(self, ctx, params: dict) -> pd.Index:
|
||||
master = ctx.master.copy()
|
||||
prices = ctx.prices
|
||||
|
||||
# 시총
|
||||
master = master[master["market_cap"].fillna(0) >= params["min_market_cap_won"]]
|
||||
|
||||
# 우선주·관리·스팩
|
||||
if params.get("skip_preferred", True):
|
||||
master = master[master["is_preferred"] == 0]
|
||||
if params.get("skip_managed", True):
|
||||
master = master[master["is_managed"] == 0]
|
||||
if params.get("skip_spac", True):
|
||||
master = master[master["is_spac"] == 0]
|
||||
|
||||
candidates = master.index
|
||||
|
||||
# 20일 평균 거래대금
|
||||
if not prices.empty:
|
||||
recent20 = (
|
||||
prices[prices["ticker"].isin(candidates)]
|
||||
.sort_values("date")
|
||||
.groupby("ticker")
|
||||
.tail(20)
|
||||
)
|
||||
avg_value = recent20.groupby("ticker")["value"].mean()
|
||||
ok = avg_value[avg_value >= params["min_avg_value_won"]].index
|
||||
candidates = candidates.intersection(ok)
|
||||
|
||||
# 최근 N일 거래정지 (volume==0 N일 이상)
|
||||
halted_days = params.get("skip_halted_days", 3)
|
||||
if halted_days > 0 and not prices.empty:
|
||||
recent = (
|
||||
prices[prices["ticker"].isin(candidates)]
|
||||
.sort_values("date")
|
||||
.groupby("ticker")
|
||||
.tail(halted_days)
|
||||
)
|
||||
zero_count = (
|
||||
recent.assign(z=lambda d: (d["volume"] == 0).astype(int))
|
||||
.groupby("ticker")["z"].sum()
|
||||
)
|
||||
healthy = zero_count[zero_count < halted_days].index
|
||||
candidates = candidates.intersection(healthy)
|
||||
|
||||
# 상장 N일 — MVP에선 listed_date null 허용, null이면 통과
|
||||
return pd.Index(candidates)
|
||||
51
stock/app/screener/nodes/ma_alignment.py
Normal file
51
stock/app/screener/nodes/ma_alignment.py
Normal file
@@ -0,0 +1,51 @@
|
||||
"""이평선 정배열 점수 — 5개 조건 충족 개수 / 5 × 100."""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .base import ScoreNode
|
||||
|
||||
|
||||
class MaAlignment(ScoreNode):
|
||||
name = "ma_alignment"
|
||||
label = "이평선 정배열"
|
||||
default_params = {"ma_periods": [50, 150, 200]}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ma_periods": {"type": "array", "items": {"type": "integer"}}
|
||||
},
|
||||
}
|
||||
|
||||
def compute(self, ctx, params: dict) -> pd.Series:
|
||||
ma_periods = params.get("ma_periods", self.default_params["ma_periods"])
|
||||
if len(ma_periods) != 3:
|
||||
raise ValueError("ma_periods must have 3 entries (short, medium, long)")
|
||||
ma_s, ma_m, ma_l = (int(x) for x in ma_periods)
|
||||
|
||||
prices = ctx.prices
|
||||
if prices.empty:
|
||||
return pd.Series(dtype=float)
|
||||
|
||||
ordered = prices.sort_values("date")
|
||||
min_history = max(252, ma_l)
|
||||
|
||||
def _score(s: pd.Series) -> float:
|
||||
closes = s.astype(float).reset_index(drop=True)
|
||||
if len(closes) < min_history:
|
||||
return float("nan")
|
||||
close = closes.iloc[-1]
|
||||
ma_short = closes.rolling(ma_s).mean().iloc[-1]
|
||||
ma_medium = closes.rolling(ma_m).mean().iloc[-1]
|
||||
ma_long = closes.rolling(ma_l).mean().iloc[-1]
|
||||
low52 = closes.iloc[-252:].min()
|
||||
conds = [
|
||||
close > ma_short,
|
||||
ma_short > ma_medium,
|
||||
ma_medium > ma_long,
|
||||
close > ma_long,
|
||||
close >= low52 * 1.25,
|
||||
]
|
||||
return sum(conds) / 5 * 100.0
|
||||
|
||||
raw = ordered.groupby("ticker", group_keys=False)["close"].apply(_score)
|
||||
return raw.fillna(0.0)
|
||||
34
stock/app/screener/nodes/momentum.py
Normal file
34
stock/app/screener/nodes/momentum.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""20일 모멘텀."""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .base import ScoreNode, percentile_rank
|
||||
|
||||
|
||||
class Momentum20(ScoreNode):
|
||||
name = "momentum"
|
||||
label = "20일 모멘텀"
|
||||
default_params = {"window_days": 20}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"window_days": {"type": "integer", "minimum": 5, "maximum": 120, "default": 20}
|
||||
},
|
||||
}
|
||||
|
||||
def compute(self, ctx, params: dict) -> pd.Series:
|
||||
window = int(params.get("window_days", 20))
|
||||
prices = ctx.prices
|
||||
if prices.empty:
|
||||
return pd.Series(dtype=float)
|
||||
|
||||
ordered = prices.sort_values("date")
|
||||
last = ordered.groupby("ticker").tail(window + 1)
|
||||
|
||||
def _ret(s):
|
||||
if len(s) < window + 1:
|
||||
return float("nan")
|
||||
return s.iloc[-1] / s.iloc[0] - 1
|
||||
|
||||
raw = last.groupby("ticker")["close"].apply(_ret)
|
||||
return percentile_rank(raw).fillna(50.0)
|
||||
48
stock/app/screener/nodes/rs_rating.py
Normal file
48
stock/app/screener/nodes/rs_rating.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""RS Rating — IBD 가중 (3m=2,6m=1,9m=1,12m=1)."""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .base import ScoreNode, percentile_rank
|
||||
|
||||
|
||||
_PERIOD_TO_DAYS = {"3m": 63, "6m": 126, "9m": 189, "12m": 252}
|
||||
|
||||
|
||||
class RsRating(ScoreNode):
|
||||
name = "rs_rating"
|
||||
label = "RS Rating (시장 대비 상대강도)"
|
||||
default_params = {"weights": {"3m": 2, "6m": 1, "9m": 1, "12m": 1}}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"weights": {"type": "object"}
|
||||
},
|
||||
}
|
||||
|
||||
def compute(self, ctx, params: dict) -> pd.Series:
|
||||
weights: dict = params.get("weights", self.default_params["weights"])
|
||||
prices = ctx.prices
|
||||
kospi = ctx.kospi
|
||||
if prices.empty or kospi.empty:
|
||||
return pd.Series(dtype=float)
|
||||
|
||||
ordered = prices.sort_values("date")
|
||||
|
||||
def _excess_for_ticker(g: pd.DataFrame) -> float:
|
||||
closes = g.set_index("date")["close"]
|
||||
total = 0.0
|
||||
wsum = 0.0
|
||||
for period, w in weights.items():
|
||||
k = _PERIOD_TO_DAYS.get(period, 0)
|
||||
if len(closes) <= k or len(kospi) <= k:
|
||||
continue
|
||||
r_stock = closes.iloc[-1] / closes.iloc[-(k + 1)] - 1
|
||||
r_market = kospi.iloc[-1] / kospi.iloc[-(k + 1)] - 1
|
||||
total += w * (r_stock - r_market)
|
||||
wsum += w
|
||||
return total / wsum if wsum else float("nan")
|
||||
|
||||
raw = ordered.groupby("ticker", group_keys=False).apply(
|
||||
_excess_for_ticker, include_groups=False
|
||||
)
|
||||
return percentile_rank(raw).fillna(50.0)
|
||||
40
stock/app/screener/nodes/vcp_lite.py
Normal file
40
stock/app/screener/nodes/vcp_lite.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""VCP-lite — 단기/장기 일중 변동성 비율 기반 수축률."""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from .base import ScoreNode, percentile_rank
|
||||
|
||||
|
||||
class VcpLite(ScoreNode):
|
||||
name = "vcp_lite"
|
||||
label = "VCP-lite (변동성 수축)"
|
||||
default_params = {"short_window": 40, "long_window": 252}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"short_window": {"type": "integer", "minimum": 10, "maximum": 120, "default": 40},
|
||||
"long_window": {"type": "integer", "minimum": 60, "maximum": 504, "default": 252},
|
||||
},
|
||||
}
|
||||
|
||||
def compute(self, ctx, params: dict) -> pd.Series:
|
||||
short_w = int(params.get("short_window", 40))
|
||||
long_w = int(params.get("long_window", 252))
|
||||
prices = ctx.prices
|
||||
if prices.empty:
|
||||
return pd.Series(dtype=float)
|
||||
|
||||
ordered = prices.sort_values("date").copy()
|
||||
ordered["range_pct"] = (ordered["high"] - ordered["low"]) / ordered["close"]
|
||||
|
||||
def _ratio(s: pd.Series) -> float:
|
||||
if len(s) < long_w:
|
||||
return float("nan")
|
||||
short_vol = s.tail(short_w).mean()
|
||||
long_vol = s.tail(long_w).mean()
|
||||
if long_vol == 0 or pd.isna(long_vol):
|
||||
return float("nan")
|
||||
return 1 - (short_vol / long_vol)
|
||||
|
||||
raw = ordered.groupby("ticker", group_keys=False)["range_pct"].apply(_ratio)
|
||||
return percentile_rank(raw).fillna(50.0)
|
||||
40
stock/app/screener/nodes/volume_surge.py
Normal file
40
stock/app/screener/nodes/volume_surge.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""거래량 급증 — log1p(recent/baseline)."""
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from .base import ScoreNode, percentile_rank
|
||||
|
||||
|
||||
class VolumeSurge(ScoreNode):
|
||||
name = "volume_surge"
|
||||
label = "거래량 급증"
|
||||
default_params = {"baseline_days": 20, "eval_days": 3}
|
||||
param_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"baseline_days": {"type": "integer", "minimum": 5, "maximum": 60, "default": 20},
|
||||
"eval_days": {"type": "integer", "minimum": 1, "maximum": 10, "default": 3},
|
||||
},
|
||||
}
|
||||
|
||||
def compute(self, ctx, params: dict) -> pd.Series:
|
||||
baseline = int(params.get("baseline_days", 20))
|
||||
eval_d = int(params.get("eval_days", 3))
|
||||
prices = ctx.prices
|
||||
if prices.empty:
|
||||
return pd.Series(dtype=float)
|
||||
|
||||
ordered = prices.sort_values("date")
|
||||
last_recent = ordered.groupby("ticker").tail(eval_d).groupby("ticker")["volume"].mean()
|
||||
last_baseline = (
|
||||
ordered.groupby("ticker")
|
||||
.tail(baseline + eval_d)
|
||||
.groupby("ticker")
|
||||
.head(baseline)
|
||||
.groupby("ticker")["volume"]
|
||||
.mean()
|
||||
)
|
||||
ratio = last_recent / last_baseline.replace(0, pd.NA)
|
||||
raw = np.log1p(ratio.astype(float))
|
||||
return percentile_rank(raw).fillna(50.0)
|
||||
51
stock/app/screener/position_sizer.py
Normal file
51
stock/app/screener/position_sizer.py
Normal file
@@ -0,0 +1,51 @@
|
||||
"""ATR Wilder smoothing + entry/stop/target 계산."""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
def compute_atr_wilder(df_one_ticker: pd.DataFrame, window: int = 14) -> float:
|
||||
"""단일 종목 DataFrame(date·open·high·low·close)에 대해 Wilder ATR 마지막 값."""
|
||||
g = df_one_ticker.sort_values("date").copy()
|
||||
high = g["high"].astype(float)
|
||||
low = g["low"].astype(float)
|
||||
close = g["close"].astype(float)
|
||||
prev_close = close.shift(1)
|
||||
tr = pd.concat([
|
||||
(high - low),
|
||||
(high - prev_close).abs(),
|
||||
(low - prev_close).abs(),
|
||||
], axis=1).max(axis=1)
|
||||
atr = tr.ewm(alpha=1 / window, adjust=False).mean()
|
||||
return float(atr.iloc[-1])
|
||||
|
||||
|
||||
def round_won(x: float) -> int:
|
||||
return int(round(x))
|
||||
|
||||
|
||||
def plan_positions(ctx, tickers: list, params: dict) -> dict:
|
||||
"""각 ticker 에 대해 entry/stop/target/atr14 반환."""
|
||||
atr_window = int(params.get("atr_window", 14))
|
||||
stop_mult = float(params.get("atr_stop_mult", 2.0))
|
||||
rr = float(params.get("rr_ratio", 2.0))
|
||||
|
||||
prices = ctx.prices.sort_values("date")
|
||||
out: dict = {}
|
||||
for t in tickers:
|
||||
sub = prices[prices["ticker"] == t]
|
||||
if sub.empty:
|
||||
continue
|
||||
close = float(sub["close"].iloc[-1])
|
||||
atr14 = compute_atr_wilder(sub, window=atr_window)
|
||||
entry = round_won(close * 1.005)
|
||||
stop = round_won(close - stop_mult * atr14)
|
||||
target = round_won(entry + rr * (entry - stop))
|
||||
r_pct = (entry - stop) / entry * 100 if entry else 0.0
|
||||
out[t] = {
|
||||
"entry_price": entry,
|
||||
"stop_price": stop,
|
||||
"target_price": target,
|
||||
"atr14": atr14,
|
||||
"r_pct": r_pct,
|
||||
}
|
||||
return out
|
||||
26
stock/app/screener/registry.py
Normal file
26
stock/app/screener/registry.py
Normal file
@@ -0,0 +1,26 @@
|
||||
"""Registry of node classes (single source of truth for /nodes endpoint)."""
|
||||
|
||||
from .nodes.hygiene import HygieneGate
|
||||
from .nodes.foreign_buy import ForeignBuy
|
||||
from .nodes.volume_surge import VolumeSurge
|
||||
from .nodes.momentum import Momentum20
|
||||
from .nodes.high52w import High52WProximity
|
||||
from .nodes.rs_rating import RsRating
|
||||
from .nodes.ma_alignment import MaAlignment
|
||||
from .nodes.vcp_lite import VcpLite
|
||||
from .nodes.ai_news import AiNewsSentiment
|
||||
|
||||
NODE_REGISTRY: dict = {
|
||||
"foreign_buy": ForeignBuy,
|
||||
"volume_surge": VolumeSurge,
|
||||
"momentum": Momentum20,
|
||||
"high52w": High52WProximity,
|
||||
"rs_rating": RsRating,
|
||||
"ma_alignment": MaAlignment,
|
||||
"vcp_lite": VcpLite,
|
||||
"ai_news": AiNewsSentiment,
|
||||
}
|
||||
|
||||
GATE_REGISTRY: dict = {
|
||||
"hygiene": HygieneGate,
|
||||
}
|
||||
371
stock/app/screener/router.py
Normal file
371
stock/app/screener/router.py
Normal file
@@ -0,0 +1,371 @@
|
||||
"""FastAPI router for /api/stock/screener/*"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
import json
|
||||
import os
|
||||
import sqlite3
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
|
||||
from . import schemas
|
||||
from .registry import NODE_REGISTRY, GATE_REGISTRY
|
||||
|
||||
|
||||
router = APIRouter(prefix="/api/stock/screener")
|
||||
|
||||
|
||||
import json as _json
|
||||
import pathlib as _pathlib
|
||||
|
||||
_HOLIDAYS_CACHE = None
|
||||
|
||||
|
||||
def _holidays():
|
||||
global _HOLIDAYS_CACHE
|
||||
if _HOLIDAYS_CACHE is None:
|
||||
path = _pathlib.Path(__file__).resolve().parent.parent / "holidays.json"
|
||||
try:
|
||||
with path.open(encoding="utf-8") as f:
|
||||
data = _json.load(f)
|
||||
_HOLIDAYS_CACHE = set(data) if isinstance(data, list) else set(data.keys())
|
||||
except FileNotFoundError:
|
||||
_HOLIDAYS_CACHE = set()
|
||||
return _HOLIDAYS_CACHE
|
||||
|
||||
|
||||
def _is_holiday(d: dt.date) -> bool:
|
||||
return d.weekday() >= 5 or d.isoformat() in _holidays()
|
||||
|
||||
|
||||
def _db_path() -> str:
|
||||
return os.environ.get("STOCK_DB_PATH", "/app/data/stock.db")
|
||||
|
||||
|
||||
def _conn() -> sqlite3.Connection:
|
||||
# WAL 모드 + busy_timeout으로 동시 read/write lock 회피
|
||||
# WAL은 reader vs writer 동시성만 해결 — writer 두 명은 직렬이므로 busy_timeout이
|
||||
# snapshot/refresh의 write 시간보다 길어야 함 (네이버 스크래핑 ~20초 + DB upsert).
|
||||
conn = sqlite3.connect(_db_path(), timeout=120.0)
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
conn.execute("PRAGMA busy_timeout=120000")
|
||||
return conn
|
||||
|
||||
|
||||
# ---------- /nodes ----------
|
||||
|
||||
@router.get("/nodes", response_model=schemas.NodesResponse)
|
||||
def get_nodes():
|
||||
score_nodes = [
|
||||
schemas.NodeMeta(
|
||||
name=cls.name, label=cls.label,
|
||||
default_params=cls.default_params, param_schema=cls.param_schema,
|
||||
)
|
||||
for cls in NODE_REGISTRY.values()
|
||||
]
|
||||
gate_nodes = [
|
||||
schemas.NodeMeta(
|
||||
name=cls.name, label=cls.label,
|
||||
default_params=cls.default_params, param_schema=cls.param_schema,
|
||||
)
|
||||
for cls in GATE_REGISTRY.values()
|
||||
]
|
||||
return schemas.NodesResponse(score_nodes=score_nodes, gate_nodes=gate_nodes)
|
||||
|
||||
|
||||
# ---------- /settings ----------
|
||||
|
||||
@router.get("/settings", response_model=schemas.SettingsResponse)
|
||||
def get_settings():
|
||||
with _conn() as c:
|
||||
row = c.execute(
|
||||
"SELECT weights_json, node_params_json, gate_params_json, "
|
||||
"top_n, rr_ratio, atr_window, atr_stop_mult, updated_at "
|
||||
"FROM screener_settings WHERE id=1"
|
||||
).fetchone()
|
||||
if row is None:
|
||||
raise HTTPException(503, "settings not initialized")
|
||||
return schemas.SettingsResponse(
|
||||
weights=json.loads(row[0]),
|
||||
node_params=json.loads(row[1]),
|
||||
gate_params=json.loads(row[2]),
|
||||
top_n=row[3], rr_ratio=row[4], atr_window=row[5], atr_stop_mult=row[6],
|
||||
updated_at=row[7],
|
||||
)
|
||||
|
||||
|
||||
@router.put("/settings", response_model=schemas.SettingsResponse)
|
||||
def put_settings(body: schemas.SettingsBody):
|
||||
now = dt.datetime.utcnow().isoformat()
|
||||
with _conn() as c:
|
||||
c.execute(
|
||||
"""UPDATE screener_settings SET
|
||||
weights_json=?, node_params_json=?, gate_params_json=?,
|
||||
top_n=?, rr_ratio=?, atr_window=?, atr_stop_mult=?, updated_at=?
|
||||
WHERE id=1""",
|
||||
(
|
||||
json.dumps(body.weights), json.dumps(body.node_params),
|
||||
json.dumps(body.gate_params),
|
||||
body.top_n, body.rr_ratio, body.atr_window, body.atr_stop_mult, now,
|
||||
),
|
||||
)
|
||||
c.commit()
|
||||
return schemas.SettingsResponse(**body.model_dump(), updated_at=now)
|
||||
|
||||
|
||||
# ---------- /run ----------
|
||||
|
||||
from . import telegram as _tg
|
||||
from .engine import Screener, ScreenContext
|
||||
|
||||
|
||||
def _resolve_asof(asof_str, conn: sqlite3.Connection) -> dt.date:
|
||||
if asof_str:
|
||||
return dt.date.fromisoformat(asof_str)
|
||||
row = conn.execute("SELECT max(date) FROM krx_daily_prices").fetchone()
|
||||
if not row or row[0] is None:
|
||||
raise HTTPException(503, "no snapshot available — run /snapshot/refresh first")
|
||||
return dt.date.fromisoformat(row[0])
|
||||
|
||||
|
||||
def _load_settings(conn) -> dict:
|
||||
row = conn.execute(
|
||||
"SELECT weights_json,node_params_json,gate_params_json,top_n,"
|
||||
"rr_ratio,atr_window,atr_stop_mult FROM screener_settings WHERE id=1"
|
||||
).fetchone()
|
||||
return {
|
||||
"weights": json.loads(row[0]),
|
||||
"node_params": json.loads(row[1]),
|
||||
"gate_params": json.loads(row[2]),
|
||||
"top_n": row[3],
|
||||
"rr_ratio": row[4],
|
||||
"atr_window": row[5],
|
||||
"atr_stop_mult": row[6],
|
||||
}
|
||||
|
||||
|
||||
def _persist_run(conn, asof, mode, weights, node_params, gate_params, top_n,
|
||||
result, started_at, finished_at) -> int:
|
||||
cur = conn.execute(
|
||||
"""INSERT INTO screener_runs (asof,mode,status,started_at,finished_at,
|
||||
weights_json,node_params_json,gate_params_json,top_n,survivors_count,telegram_sent)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?,0)""",
|
||||
(asof.isoformat(), mode, "success", started_at, finished_at,
|
||||
json.dumps(weights), json.dumps(node_params), json.dumps(gate_params),
|
||||
top_n, result.survivors_count),
|
||||
)
|
||||
run_id = cur.lastrowid
|
||||
for row in result.rows:
|
||||
conn.execute(
|
||||
"""INSERT INTO screener_results (run_id,rank,ticker,name,total_score,
|
||||
scores_json,close,market_cap,entry_price,stop_price,target_price,atr14)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?,?,?)""",
|
||||
(run_id, row["rank"], row["ticker"], row["name"], row["total_score"],
|
||||
json.dumps(row["scores"]), row["close"], row["market_cap"],
|
||||
row["entry_price"], row["stop_price"], row["target_price"], row["atr14"]),
|
||||
)
|
||||
conn.commit()
|
||||
return run_id
|
||||
|
||||
|
||||
@router.post("/run", response_model=schemas.RunResponse)
|
||||
def post_run(body: schemas.RunRequest):
|
||||
from .registry import NODE_REGISTRY as _NR, GATE_REGISTRY as _GR
|
||||
started_at = dt.datetime.utcnow().isoformat()
|
||||
with _conn() as c:
|
||||
asof = _resolve_asof(body.asof, c)
|
||||
|
||||
# Skipped holiday handling for mode='auto'
|
||||
if body.mode == "auto" and _is_holiday(asof):
|
||||
return schemas.RunResponse(
|
||||
asof=asof.isoformat(), mode="auto", status="skipped_holiday",
|
||||
run_id=None, survivors_count=None,
|
||||
weights={}, top_n=0,
|
||||
results=[], telegram_payload=None,
|
||||
warnings=[f"{asof.isoformat()} is a holiday — skipped"],
|
||||
)
|
||||
|
||||
defaults = _load_settings(c)
|
||||
|
||||
if body.mode == "auto":
|
||||
weights = defaults["weights"]
|
||||
node_params = defaults["node_params"]
|
||||
gate_params = defaults["gate_params"]
|
||||
top_n = defaults["top_n"]
|
||||
else:
|
||||
weights = body.weights if body.weights is not None else defaults["weights"]
|
||||
node_params = body.node_params if body.node_params is not None else defaults["node_params"]
|
||||
gate_params = body.gate_params if body.gate_params is not None else defaults["gate_params"]
|
||||
top_n = body.top_n if body.top_n is not None else defaults["top_n"]
|
||||
|
||||
sizer_params = {
|
||||
"atr_window": defaults["atr_window"],
|
||||
"atr_stop_mult": defaults["atr_stop_mult"],
|
||||
"rr_ratio": defaults["rr_ratio"],
|
||||
}
|
||||
|
||||
ctx = ScreenContext.load(c, asof)
|
||||
score_nodes = [cls() for name, cls in _NR.items() if weights.get(name, 0) > 0]
|
||||
gate = _GR["hygiene"]()
|
||||
|
||||
try:
|
||||
screener = Screener(
|
||||
gate=gate, score_nodes=score_nodes, weights=weights,
|
||||
node_params=node_params, gate_params=gate_params,
|
||||
top_n=top_n, sizer_params=sizer_params,
|
||||
)
|
||||
result = screener.run(ctx)
|
||||
except ValueError as e:
|
||||
raise HTTPException(422, str(e))
|
||||
|
||||
finished_at = dt.datetime.utcnow().isoformat()
|
||||
run_id = None
|
||||
if body.mode in ("manual_save", "auto"):
|
||||
run_id = _persist_run(c, asof, body.mode, weights, node_params, gate_params,
|
||||
top_n, result, started_at, finished_at)
|
||||
|
||||
payload = _tg.build_telegram_payload(
|
||||
asof=asof, mode=body.mode, survivors_count=result.survivors_count,
|
||||
top_n=top_n, rows=result.rows, run_id=run_id,
|
||||
)
|
||||
|
||||
return schemas.RunResponse(
|
||||
asof=asof.isoformat(), mode=body.mode, status="success",
|
||||
run_id=run_id, survivors_count=result.survivors_count,
|
||||
weights=weights, top_n=top_n,
|
||||
results=result.rows,
|
||||
telegram_payload=schemas.TelegramPayload(**payload),
|
||||
warnings=result.warnings,
|
||||
)
|
||||
|
||||
|
||||
# ---------- /snapshot/refresh ----------
|
||||
|
||||
from . import snapshot as _snap
|
||||
|
||||
|
||||
@router.post("/snapshot/refresh")
|
||||
def post_snapshot_refresh(asof: Optional[str] = None):
|
||||
asof_date = dt.date.fromisoformat(asof) if asof else dt.date.today()
|
||||
if asof_date.weekday() >= 5:
|
||||
return {"asof": asof_date.isoformat(), "status": "skipped_weekend"}
|
||||
with _conn() as c:
|
||||
summary = _snap.refresh_daily(c, asof_date)
|
||||
return summary
|
||||
|
||||
|
||||
# ---------- /runs ----------
|
||||
|
||||
@router.get("/runs", response_model=list[schemas.RunSummary])
|
||||
def list_runs(limit: int = 30):
|
||||
with _conn() as c:
|
||||
rows = c.execute(
|
||||
"SELECT id,asof,mode,status,started_at,finished_at,top_n,"
|
||||
"survivors_count,telegram_sent FROM screener_runs "
|
||||
"ORDER BY asof DESC, id DESC LIMIT ?", (limit,),
|
||||
).fetchall()
|
||||
return [
|
||||
schemas.RunSummary(
|
||||
id=r[0], asof=r[1], mode=r[2], status=r[3],
|
||||
started_at=r[4], finished_at=r[5], top_n=r[6],
|
||||
survivors_count=r[7], telegram_sent=bool(r[8]),
|
||||
)
|
||||
for r in rows
|
||||
]
|
||||
|
||||
|
||||
# ---------- /snapshot/refresh-news-sentiment ----------
|
||||
|
||||
from .ai_news import pipeline as _ai_pipeline
|
||||
from .ai_news import telegram as _ai_telegram
|
||||
from .ai_news import validation as _ai_validation
|
||||
|
||||
|
||||
@router.post("/snapshot/refresh-news-sentiment")
|
||||
async def post_refresh_news_sentiment(asof: Optional[str] = None):
|
||||
asof_date = dt.date.fromisoformat(asof) if asof else dt.date.today()
|
||||
if asof_date.weekday() >= 5:
|
||||
return {"asof": asof_date.isoformat(), "status": "skipped_weekend"}
|
||||
if _is_holiday(asof_date):
|
||||
return {"asof": asof_date.isoformat(), "status": "skipped_holiday"}
|
||||
with _conn() as c:
|
||||
summary = await _ai_pipeline.refresh_daily(c, asof_date)
|
||||
# top_pos/top_neg 항목에 종목명 주입 (텔레그램 가독성)
|
||||
tickers = {r["ticker"] for r in summary["top_pos"] + summary["top_neg"]}
|
||||
if tickers:
|
||||
placeholders = ",".join("?" * len(tickers))
|
||||
name_map = {
|
||||
row[0]: row[1] for row in c.execute(
|
||||
f"SELECT ticker, name FROM krx_master WHERE ticker IN ({placeholders})",
|
||||
list(tickers),
|
||||
).fetchall()
|
||||
}
|
||||
for r in summary["top_pos"] + summary["top_neg"]:
|
||||
r["name"] = name_map.get(r["ticker"], "")
|
||||
summary["telegram_text"] = _ai_telegram.build_message(
|
||||
asof=summary["asof"],
|
||||
top_pos=summary["top_pos"], top_neg=summary["top_neg"],
|
||||
tokens_input=summary["tokens_input"],
|
||||
tokens_output=summary["tokens_output"],
|
||||
mapping=summary.get("mapping"),
|
||||
)
|
||||
return summary
|
||||
|
||||
|
||||
# ---------- /ai-news/ic ----------
|
||||
|
||||
@router.get("/ai-news/ic")
|
||||
def get_ai_news_ic(days: int = 30, horizon: int = 1, min_news_count: int = 1):
|
||||
"""ai_news.score_raw 의 forward return IC (Spearman) 계산.
|
||||
|
||||
verdict:
|
||||
- skip: ic_count < 10 (데이터 부족)
|
||||
- weak: |ic_mean| <= 0.05
|
||||
- strong: |ic_mean| > 0.05 (gradient 활성화 가치 있음)
|
||||
"""
|
||||
with _conn() as c:
|
||||
return _ai_validation.compute_ic(
|
||||
c, days=days, horizon=horizon, min_news_count=min_news_count,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}")
|
||||
def get_run(run_id: int):
|
||||
with _conn() as c:
|
||||
meta = c.execute(
|
||||
"SELECT id,asof,mode,status,started_at,finished_at,top_n,"
|
||||
"survivors_count,telegram_sent,weights_json,node_params_json,gate_params_json "
|
||||
"FROM screener_runs WHERE id=?",
|
||||
(run_id,),
|
||||
).fetchone()
|
||||
if not meta:
|
||||
raise HTTPException(404, "run not found")
|
||||
rows = c.execute(
|
||||
"SELECT rank,ticker,name,total_score,scores_json,close,market_cap,"
|
||||
"entry_price,stop_price,target_price,atr14 "
|
||||
"FROM screener_results WHERE run_id=? ORDER BY rank",
|
||||
(run_id,),
|
||||
).fetchall()
|
||||
|
||||
return {
|
||||
"meta": {
|
||||
"id": meta[0], "asof": meta[1], "mode": meta[2], "status": meta[3],
|
||||
"started_at": meta[4], "finished_at": meta[5], "top_n": meta[6],
|
||||
"survivors_count": meta[7], "telegram_sent": bool(meta[8]),
|
||||
"weights": json.loads(meta[9]),
|
||||
"node_params": json.loads(meta[10]),
|
||||
"gate_params": json.loads(meta[11]),
|
||||
},
|
||||
"results": [
|
||||
{
|
||||
"rank": r[0], "ticker": r[1], "name": r[2],
|
||||
"total_score": r[3], "scores": json.loads(r[4]),
|
||||
"close": r[5], "market_cap": r[6],
|
||||
"entry_price": r[7], "stop_price": r[8], "target_price": r[9],
|
||||
"atr14": r[10],
|
||||
}
|
||||
for r in rows
|
||||
],
|
||||
}
|
||||
204
stock/app/screener/schema.py
Normal file
204
stock/app/screener/schema.py
Normal file
@@ -0,0 +1,204 @@
|
||||
"""Screener schema bootstrap. Called once at module import via db.py."""
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from datetime import datetime, timezone
|
||||
|
||||
DEFAULT_WEIGHTS = {
|
||||
"foreign_buy": 1.0,
|
||||
"volume_surge": 1.0,
|
||||
"momentum": 1.0,
|
||||
"high52w": 1.2,
|
||||
"rs_rating": 1.2,
|
||||
"ma_alignment": 1.0,
|
||||
"vcp_lite": 0.8,
|
||||
# ai_news: 검증 전 gradient 차단 (4주 IC > 0.05 확인 후 활성화).
|
||||
# 데이터 수집은 계속, 가중합 영향만 0.
|
||||
"ai_news": 0.0,
|
||||
}
|
||||
DEFAULT_NODE_PARAMS = {
|
||||
"foreign_buy": {"window_days": 5},
|
||||
"volume_surge": {"baseline_days": 20, "eval_days": 3},
|
||||
"momentum": {"window_days": 20},
|
||||
"high52w": {"window_days": 252},
|
||||
"rs_rating": {"weights": {"3m": 2, "6m": 1, "9m": 1, "12m": 1}},
|
||||
"ma_alignment": {"ma_periods": [50, 150, 200]},
|
||||
"vcp_lite": {"short_window": 40, "long_window": 252},
|
||||
"ai_news": {"min_news_count": 1},
|
||||
}
|
||||
DEFAULT_GATE_PARAMS = {
|
||||
"min_market_cap_won": 50_000_000_000,
|
||||
"min_avg_value_won": 500_000_000,
|
||||
"min_listed_days": 60,
|
||||
"skip_managed": True,
|
||||
"skip_preferred": True,
|
||||
"skip_spac": True,
|
||||
"skip_halted_days": 3,
|
||||
}
|
||||
|
||||
DDL = """
|
||||
CREATE TABLE IF NOT EXISTS krx_master (
|
||||
ticker TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
market TEXT NOT NULL,
|
||||
market_cap INTEGER,
|
||||
is_managed INTEGER NOT NULL DEFAULT 0,
|
||||
is_preferred INTEGER NOT NULL DEFAULT 0,
|
||||
is_spac INTEGER NOT NULL DEFAULT 0,
|
||||
listed_date TEXT,
|
||||
updated_at TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS krx_daily_prices (
|
||||
ticker TEXT NOT NULL,
|
||||
date TEXT NOT NULL,
|
||||
open INTEGER, high INTEGER, low INTEGER, close INTEGER,
|
||||
volume INTEGER,
|
||||
value INTEGER,
|
||||
PRIMARY KEY (ticker, date)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_prices_date ON krx_daily_prices(date);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS krx_flow (
|
||||
ticker TEXT NOT NULL,
|
||||
date TEXT NOT NULL,
|
||||
foreign_net INTEGER,
|
||||
institution_net INTEGER,
|
||||
PRIMARY KEY (ticker, date)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_date ON krx_flow(date);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS screener_settings (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
weights_json TEXT NOT NULL,
|
||||
node_params_json TEXT NOT NULL,
|
||||
gate_params_json TEXT NOT NULL,
|
||||
top_n INTEGER NOT NULL DEFAULT 20,
|
||||
rr_ratio REAL NOT NULL DEFAULT 2.0,
|
||||
atr_window INTEGER NOT NULL DEFAULT 14,
|
||||
atr_stop_mult REAL NOT NULL DEFAULT 2.0,
|
||||
updated_at TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS screener_runs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
asof TEXT NOT NULL,
|
||||
mode TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
error TEXT,
|
||||
started_at TEXT NOT NULL,
|
||||
finished_at TEXT,
|
||||
weights_json TEXT NOT NULL,
|
||||
node_params_json TEXT NOT NULL,
|
||||
gate_params_json TEXT NOT NULL,
|
||||
top_n INTEGER NOT NULL,
|
||||
survivors_count INTEGER,
|
||||
telegram_sent INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_runs_asof ON screener_runs(asof DESC);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS screener_results (
|
||||
run_id INTEGER NOT NULL,
|
||||
rank INTEGER NOT NULL,
|
||||
ticker TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
total_score REAL NOT NULL,
|
||||
scores_json TEXT NOT NULL,
|
||||
close INTEGER,
|
||||
market_cap INTEGER,
|
||||
entry_price INTEGER,
|
||||
stop_price INTEGER,
|
||||
target_price INTEGER,
|
||||
atr14 REAL,
|
||||
PRIMARY KEY (run_id, ticker),
|
||||
FOREIGN KEY (run_id) REFERENCES screener_runs(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_results_run_rank ON screener_results(run_id, rank);
|
||||
|
||||
-- articles 테이블 (도메스틱/해외 뉴스 원본).
|
||||
-- 메인 app.db.init_db() 에서도 생성하지만, 테스트 환경 및 단독 screener 컨텍스트
|
||||
-- (ai_news.articles_source 등)에서도 참조 가능하도록 idempotent 하게 보장한다.
|
||||
CREATE TABLE IF NOT EXISTS articles (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
hash TEXT UNIQUE NOT NULL,
|
||||
category TEXT DEFAULT 'domestic',
|
||||
title TEXT NOT NULL,
|
||||
link TEXT,
|
||||
summary TEXT,
|
||||
press TEXT,
|
||||
pub_date TEXT,
|
||||
crawled_at TEXT
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_articles_crawled ON articles(crawled_at DESC);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS news_sentiment (
|
||||
ticker TEXT NOT NULL,
|
||||
date TEXT NOT NULL,
|
||||
score_raw REAL NOT NULL,
|
||||
reason TEXT NOT NULL DEFAULT '',
|
||||
news_count INTEGER NOT NULL DEFAULT 0,
|
||||
tokens_input INTEGER NOT NULL DEFAULT 0,
|
||||
tokens_output INTEGER NOT NULL DEFAULT 0,
|
||||
model TEXT NOT NULL DEFAULT 'claude-haiku-4-5-20251001',
|
||||
source TEXT NOT NULL DEFAULT 'articles',
|
||||
created_at TEXT NOT NULL DEFAULT (datetime('now','localtime')),
|
||||
PRIMARY KEY (ticker, date)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_news_sentiment_date ON news_sentiment(date DESC);
|
||||
"""
|
||||
|
||||
|
||||
def ensure_screener_schema(conn: sqlite3.Connection) -> None:
|
||||
"""Create tables and seed default settings (idempotent)."""
|
||||
conn.executescript(DDL)
|
||||
# ai_news 키 누락 시 1회 보충 (이미 운영 중인 환경에 대해)
|
||||
row = conn.execute(
|
||||
"SELECT weights_json, node_params_json FROM screener_settings WHERE id=1"
|
||||
).fetchone()
|
||||
if row is not None:
|
||||
w = json.loads(row[0])
|
||||
p = json.loads(row[1])
|
||||
changed = False
|
||||
if "ai_news" not in w:
|
||||
w["ai_news"] = DEFAULT_WEIGHTS["ai_news"]
|
||||
changed = True
|
||||
# One-time reset: ai_news default 0.8 → 0.0 (검증 전 gradient 차단).
|
||||
# 사용자가 명시적으로 0.8 외 값을 설정했다면 영향 없음.
|
||||
elif w.get("ai_news") == 0.8:
|
||||
w["ai_news"] = 0.0
|
||||
changed = True
|
||||
if "ai_news" not in p:
|
||||
p["ai_news"] = DEFAULT_NODE_PARAMS["ai_news"]
|
||||
changed = True
|
||||
if changed:
|
||||
conn.execute(
|
||||
"UPDATE screener_settings SET weights_json=?, node_params_json=? WHERE id=1",
|
||||
(json.dumps(w), json.dumps(p)),
|
||||
)
|
||||
# news_sentiment.source 컬럼 1회 추가 (기존 운영 환경)
|
||||
cols = {r[1] for r in conn.execute(
|
||||
"PRAGMA table_info(news_sentiment)"
|
||||
).fetchall()}
|
||||
if "source" not in cols:
|
||||
conn.execute(
|
||||
"ALTER TABLE news_sentiment "
|
||||
"ADD COLUMN source TEXT NOT NULL DEFAULT 'articles'"
|
||||
)
|
||||
existing = conn.execute("SELECT id FROM screener_settings WHERE id=1").fetchone()
|
||||
if existing is None:
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO screener_settings (
|
||||
id, weights_json, node_params_json, gate_params_json,
|
||||
top_n, rr_ratio, atr_window, atr_stop_mult, updated_at
|
||||
) VALUES (1, ?, ?, ?, 20, 2.0, 14, 2.0, ?)
|
||||
""",
|
||||
(
|
||||
json.dumps(DEFAULT_WEIGHTS),
|
||||
json.dumps(DEFAULT_NODE_PARAMS),
|
||||
json.dumps(DEFAULT_GATE_PARAMS),
|
||||
now,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
85
stock/app/screener/schemas.py
Normal file
85
stock/app/screener/schemas.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from __future__ import annotations
|
||||
from typing import Literal, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class NodeMeta(BaseModel):
|
||||
name: str
|
||||
label: str
|
||||
default_params: dict
|
||||
param_schema: dict
|
||||
|
||||
|
||||
class NodesResponse(BaseModel):
|
||||
score_nodes: list[NodeMeta]
|
||||
gate_nodes: list[NodeMeta]
|
||||
|
||||
|
||||
class SettingsBody(BaseModel):
|
||||
weights: dict[str, float]
|
||||
node_params: dict[str, dict] = Field(default_factory=dict)
|
||||
gate_params: dict
|
||||
top_n: int = 20
|
||||
rr_ratio: float = 2.0
|
||||
atr_window: int = 14
|
||||
atr_stop_mult: float = 2.0
|
||||
|
||||
|
||||
class SettingsResponse(SettingsBody):
|
||||
updated_at: str
|
||||
|
||||
|
||||
class RunRequest(BaseModel):
|
||||
mode: Literal["preview", "manual_save", "auto"] = "preview"
|
||||
asof: Optional[str] = None
|
||||
weights: Optional[dict[str, float]] = None
|
||||
node_params: Optional[dict[str, dict]] = None
|
||||
gate_params: Optional[dict] = None
|
||||
top_n: Optional[int] = None
|
||||
|
||||
|
||||
class ResultRow(BaseModel):
|
||||
rank: int
|
||||
ticker: str
|
||||
name: str
|
||||
total_score: float
|
||||
scores: dict[str, float]
|
||||
close: int
|
||||
market_cap: int
|
||||
entry_price: Optional[int] = None
|
||||
stop_price: Optional[int] = None
|
||||
target_price: Optional[int] = None
|
||||
atr14: Optional[float] = None
|
||||
r_pct: Optional[float] = None
|
||||
|
||||
|
||||
class TelegramPayload(BaseModel):
|
||||
chat_target: str
|
||||
parse_mode: str
|
||||
text: str
|
||||
|
||||
|
||||
class RunResponse(BaseModel):
|
||||
asof: str
|
||||
mode: str
|
||||
status: Literal["success", "failed", "skipped_holiday"]
|
||||
run_id: Optional[int] = None
|
||||
survivors_count: Optional[int] = None
|
||||
weights: dict[str, float]
|
||||
top_n: int
|
||||
results: list[ResultRow] = Field(default_factory=list)
|
||||
telegram_payload: Optional[TelegramPayload] = None
|
||||
warnings: list[str] = Field(default_factory=list)
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class RunSummary(BaseModel):
|
||||
id: int
|
||||
asof: str
|
||||
mode: str
|
||||
status: str
|
||||
started_at: str
|
||||
finished_at: Optional[str] = None
|
||||
top_n: int
|
||||
survivors_count: Optional[int] = None
|
||||
telegram_sent: bool
|
||||
250
stock/app/screener/snapshot.py
Normal file
250
stock/app/screener/snapshot.py
Normal file
@@ -0,0 +1,250 @@
|
||||
"""KRX daily snapshot loader (FDR + naver finance scraping)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
import logging
|
||||
import re
|
||||
import sqlite3
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
|
||||
import FinanceDataReader as fdr
|
||||
import httpx
|
||||
import pandas as pd
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
NAVER_FRGN_URL = "https://finance.naver.com/item/frgn.naver"
|
||||
NAVER_HEADERS = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
||||
"Referer": "https://finance.naver.com/",
|
||||
}
|
||||
|
||||
DEFAULT_FLOW_TOP_N = 100
|
||||
DEFAULT_RATE_LIMIT_SEC = 0.2
|
||||
# 시총 상위 100종목 × 0.2초 = ~20초 — agent-office httpx timeout(180s) 안에 여유롭게 완료
|
||||
# 외국인 매수 시그널은 대형주에서 의미가 크므로 상위 100종목으로 충분.
|
||||
# 더 많은 종목이 필요하면 별도 cron으로 분리 권장.
|
||||
|
||||
|
||||
@dataclass
|
||||
class RefreshSummary:
|
||||
asof: dt.date
|
||||
master_count: int
|
||||
prices_count: int
|
||||
flow_count: int
|
||||
failures: list[str]
|
||||
|
||||
def asdict(self) -> dict:
|
||||
return {
|
||||
"asof": self.asof.isoformat(),
|
||||
"master_count": self.master_count,
|
||||
"prices_count": self.prices_count,
|
||||
"flow_count": self.flow_count,
|
||||
"failures": self.failures,
|
||||
}
|
||||
|
||||
|
||||
def _iso(d: dt.date) -> str:
|
||||
return d.isoformat()
|
||||
|
||||
|
||||
def _is_preferred(name: str) -> int:
|
||||
"""우선주 휴리스틱: 종목명이 '우'로 끝나거나 '우[A-Z]?'/'우\\d?' 패턴."""
|
||||
n = name or ""
|
||||
return 1 if re.search(r"우[A-Z]?$|우\d?$", n) else 0
|
||||
|
||||
|
||||
def _is_spac(name: str) -> int:
|
||||
return 1 if "스팩" in (name or "") else 0
|
||||
|
||||
|
||||
def fetch_master_listing() -> pd.DataFrame:
|
||||
"""fdr.StockListing('KRX'). Wrapped for stub-ability in tests."""
|
||||
return fdr.StockListing("KRX")
|
||||
|
||||
|
||||
def fetch_ohlcv_for_ticker(ticker: str, start: str, end: str) -> pd.DataFrame:
|
||||
"""fdr.DataReader for backfill."""
|
||||
return fdr.DataReader(ticker, start, end)
|
||||
|
||||
|
||||
def fetch_flow_naver(ticker: str, *, client) -> dict | None:
|
||||
"""Scrape naver frgn page; return latest-day flow dict, or None."""
|
||||
r = client.get(NAVER_FRGN_URL, params={"code": ticker, "page": 1})
|
||||
if r.status_code != 200:
|
||||
return None
|
||||
soup = BeautifulSoup(r.text, "lxml")
|
||||
for row in soup.select("table.type2 tr"):
|
||||
cells = [c.get_text(strip=True).replace(",", "") for c in row.select("td")]
|
||||
if not cells or not cells[0]:
|
||||
continue
|
||||
if not re.match(r"\d{4}\.\d{2}\.\d{2}", cells[0]):
|
||||
continue
|
||||
try:
|
||||
inst = int(cells[5]) if cells[5] not in ("", "-") else 0
|
||||
foreign = int(cells[6]) if cells[6] not in ("", "-") else 0
|
||||
return {
|
||||
"date": cells[0].replace(".", "-"),
|
||||
"foreign_net": foreign,
|
||||
"institution_net": inst,
|
||||
}
|
||||
except (IndexError, ValueError):
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _master_and_prices_rows(asof: dt.date,
|
||||
df: pd.DataFrame) -> tuple[list[tuple], list[tuple]]:
|
||||
iso = _iso(asof)
|
||||
now_iso = dt.datetime.utcnow().isoformat()
|
||||
master_rows: list[tuple] = []
|
||||
price_rows: list[tuple] = []
|
||||
for _, row in df.iterrows():
|
||||
ticker = str(row.get("Code") or "").strip()
|
||||
name = str(row.get("Name") or "").strip()
|
||||
if not ticker or not name:
|
||||
continue
|
||||
market_raw = str(row.get("Market") or "").upper()
|
||||
market = "KOSDAQ" if "KOSDAQ" in market_raw else "KOSPI"
|
||||
try:
|
||||
market_cap = int(row["Marcap"]) if pd.notna(row.get("Marcap")) else None
|
||||
except (TypeError, ValueError):
|
||||
market_cap = None
|
||||
master_rows.append((
|
||||
ticker, name, market, market_cap,
|
||||
0, _is_preferred(name), _is_spac(name),
|
||||
None, now_iso,
|
||||
))
|
||||
try:
|
||||
o = int(row["Open"]) if pd.notna(row.get("Open")) else None
|
||||
h = int(row["High"]) if pd.notna(row.get("High")) else None
|
||||
l = int(row["Low"]) if pd.notna(row.get("Low")) else None
|
||||
c = int(row["Close"]) if pd.notna(row.get("Close")) else None
|
||||
v = int(row["Volume"]) if pd.notna(row.get("Volume")) else None
|
||||
amt = row.get("Amount")
|
||||
a = int(amt) if pd.notna(amt) else None
|
||||
if c is not None and v is not None:
|
||||
price_rows.append((ticker, iso, o, h, l, c, v, a))
|
||||
except (TypeError, KeyError):
|
||||
pass
|
||||
return master_rows, price_rows
|
||||
|
||||
|
||||
def _gather_flow_naver(asof: dt.date, tickers: list[str],
|
||||
*, rate_limit_sec: float = DEFAULT_RATE_LIMIT_SEC) -> list[tuple]:
|
||||
iso = _iso(asof)
|
||||
rows: list[tuple] = []
|
||||
if not tickers:
|
||||
return rows
|
||||
with httpx.Client(timeout=10, headers=NAVER_HEADERS) as client:
|
||||
for t in tickers:
|
||||
try:
|
||||
data = fetch_flow_naver(t, client=client)
|
||||
if data and data["date"] == iso:
|
||||
rows.append((t, iso, data["foreign_net"], data["institution_net"]))
|
||||
except Exception as e:
|
||||
log.warning("flow scrape failed for %s: %s", t, e)
|
||||
if rate_limit_sec > 0:
|
||||
time.sleep(rate_limit_sec)
|
||||
return rows
|
||||
|
||||
|
||||
def refresh_daily(conn: sqlite3.Connection, asof: dt.date,
|
||||
flow_top_n: int = DEFAULT_FLOW_TOP_N,
|
||||
rate_limit_sec: float = DEFAULT_RATE_LIMIT_SEC) -> dict:
|
||||
"""Pull master + prices (FDR) + flow (naver scraping for top N by market cap)."""
|
||||
df = fetch_master_listing()
|
||||
master_rows, price_rows = _master_and_prices_rows(asof, df)
|
||||
|
||||
conn.executemany("""
|
||||
INSERT INTO krx_master (
|
||||
ticker, name, market, market_cap,
|
||||
is_managed, is_preferred, is_spac,
|
||||
listed_date, updated_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(ticker) DO UPDATE SET
|
||||
name=excluded.name, market=excluded.market,
|
||||
market_cap=excluded.market_cap,
|
||||
is_managed=excluded.is_managed,
|
||||
is_preferred=excluded.is_preferred,
|
||||
is_spac=excluded.is_spac,
|
||||
updated_at=excluded.updated_at
|
||||
""", master_rows)
|
||||
conn.executemany("""
|
||||
INSERT OR REPLACE INTO krx_daily_prices
|
||||
(ticker, date, open, high, low, close, volume, value)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", price_rows)
|
||||
|
||||
# 외국인/기관: 시총 상위 N종목만 (rate limit 보호)
|
||||
if flow_top_n > 0:
|
||||
top = sorted(master_rows, key=lambda r: r[3] or 0, reverse=True)[:flow_top_n]
|
||||
flow_tickers = [r[0] for r in top]
|
||||
else:
|
||||
flow_tickers = []
|
||||
flow_rows = _gather_flow_naver(asof, flow_tickers, rate_limit_sec=rate_limit_sec)
|
||||
conn.executemany("""
|
||||
INSERT OR REPLACE INTO krx_flow
|
||||
(ticker, date, foreign_net, institution_net)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""", flow_rows)
|
||||
conn.commit()
|
||||
|
||||
return RefreshSummary(
|
||||
asof=asof, master_count=len(master_rows),
|
||||
prices_count=len(price_rows), flow_count=len(flow_rows),
|
||||
failures=[],
|
||||
).asdict()
|
||||
|
||||
|
||||
def backfill(conn: sqlite3.Connection, start: dt.date, end: dt.date) -> list[dict]:
|
||||
"""5년치 일봉 백필 — 종목별 fdr.DataReader 호출. Master는 end 기준 (FDR은 historical master 미지원)."""
|
||||
df = fetch_master_listing()
|
||||
master_rows, _ = _master_and_prices_rows(end, df)
|
||||
conn.executemany("""
|
||||
INSERT INTO krx_master (
|
||||
ticker, name, market, market_cap,
|
||||
is_managed, is_preferred, is_spac,
|
||||
listed_date, updated_at
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(ticker) DO UPDATE SET name=excluded.name
|
||||
""", master_rows)
|
||||
|
||||
iso_start = start.isoformat()
|
||||
iso_end = end.isoformat()
|
||||
results = []
|
||||
for r in master_rows:
|
||||
t = r[0]
|
||||
try:
|
||||
ddf = fetch_ohlcv_for_ticker(t, iso_start, iso_end)
|
||||
if ddf is None or ddf.empty:
|
||||
continue
|
||||
ddf = ddf.reset_index()
|
||||
ddf["Date"] = pd.to_datetime(ddf["Date"]).dt.strftime("%Y-%m-%d")
|
||||
rows = []
|
||||
for _, rr in ddf.iterrows():
|
||||
if pd.isna(rr["Close"]) or pd.isna(rr["Volume"]):
|
||||
continue
|
||||
rows.append((
|
||||
t, rr["Date"],
|
||||
int(rr["Open"]) if pd.notna(rr["Open"]) else None,
|
||||
int(rr["High"]) if pd.notna(rr["High"]) else None,
|
||||
int(rr["Low"]) if pd.notna(rr["Low"]) else None,
|
||||
int(rr["Close"]),
|
||||
int(rr["Volume"]),
|
||||
int(rr["Close"] * rr["Volume"]),
|
||||
))
|
||||
conn.executemany("""
|
||||
INSERT OR REPLACE INTO krx_daily_prices
|
||||
(ticker, date, open, high, low, close, volume, value)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", rows)
|
||||
results.append({"ticker": t, "count": len(rows)})
|
||||
except Exception as e:
|
||||
log.error("backfill failed for %s: %s", t, e)
|
||||
results.append({"ticker": t, "error": str(e)})
|
||||
conn.commit()
|
||||
return results
|
||||
82
stock/app/screener/telegram.py
Normal file
82
stock/app/screener/telegram.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""Telegram payload builder. Caller (agent-office) handles actual delivery."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
|
||||
# 노드별 풀 라벨 (아이콘 대신 사용 — 사용자가 명확한 이름 선호)
|
||||
NODE_LABELS = {
|
||||
"foreign_buy": "외국인",
|
||||
"volume_surge": "거래량급증",
|
||||
"momentum": "20일모멘텀",
|
||||
"high52w": "52주신고가",
|
||||
"rs_rating": "RS레이팅",
|
||||
"ma_alignment": "이평선정배열",
|
||||
"vcp_lite": "VCP수축",
|
||||
}
|
||||
|
||||
PAGE_BASE = "https://gahusb.synology.me/stock/screener"
|
||||
|
||||
|
||||
def _escape_md(s: str) -> str:
|
||||
"""Minimal MarkdownV2 escape — extend if formatting breaks."""
|
||||
for ch in r"\_*[]()~`>#+-=|{}.!":
|
||||
s = s.replace(ch, "\\" + ch)
|
||||
return s
|
||||
|
||||
|
||||
def _format_won(n) -> str:
|
||||
"""1,234,567원 형태 (None 시 '-')."""
|
||||
if n is None:
|
||||
return "\\-"
|
||||
return f"{int(n):,}원"
|
||||
|
||||
|
||||
def _format_active_nodes(scores: dict, threshold: int = 70) -> str:
|
||||
"""70점 이상 노드를 '라벨 점수' 형태로 나열, 콤마 구분."""
|
||||
active = []
|
||||
for name, sc in scores.items():
|
||||
label = NODE_LABELS.get(name)
|
||||
if label is None or sc < threshold:
|
||||
continue
|
||||
active.append(f"{_escape_md(label)} {int(sc)}")
|
||||
return " · ".join(active) if active else "\\(70점 이상 노드 없음\\)"
|
||||
|
||||
|
||||
def build_telegram_payload(asof: dt.date, mode: str, survivors_count: int,
|
||||
top_n: int, rows: list, run_id) -> dict:
|
||||
title = "*KRX 강세주 스크리너*"
|
||||
header = (
|
||||
f"🎯 {title} — {_escape_md(asof.isoformat())} \\({_escape_md(mode)}\\)\n"
|
||||
f"통과 {survivors_count}종 / Top {top_n} / 본문 1\\-10"
|
||||
)
|
||||
|
||||
lines = []
|
||||
for r in rows[:10]:
|
||||
nodes_str = _format_active_nodes(r.get("scores", {}))
|
||||
score_str = f"{r['total_score']:.1f}"
|
||||
r_pct = r.get("r_pct")
|
||||
r_pct_str = f"{r_pct:.1f}" if r_pct is not None else "-"
|
||||
lines.append(
|
||||
f"{r['rank']}\\. *{_escape_md(r['name'])}* `{r['ticker']}` "
|
||||
f"⭐ {_escape_md(score_str)}\n"
|
||||
f" {nodes_str}\n"
|
||||
f" 진입 {_format_won(r.get('entry_price'))} "
|
||||
f"손절 {_format_won(r.get('stop_price'))} "
|
||||
f"익절 {_format_won(r.get('target_price'))} "
|
||||
f"\\(R {_escape_md(r_pct_str)}%\\)"
|
||||
)
|
||||
|
||||
# URL은 inline link로 감싸 URL 내부 . - ? = 이스케이프 회피
|
||||
link = (
|
||||
f"🔗 [전체 결과·11\\~20위]({PAGE_BASE}?run_id={run_id})"
|
||||
if run_id else ""
|
||||
)
|
||||
|
||||
text = header + "\n\n" + "\n\n".join(lines) + ("\n\n" + link if link else "")
|
||||
|
||||
return {
|
||||
"chat_target": "default",
|
||||
"parse_mode": "MarkdownV2",
|
||||
"text": text,
|
||||
}
|
||||
131
stock/app/test_price_fetcher.py
Normal file
131
stock/app/test_price_fetcher.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""price_fetcher._select_price_from_response 단위 테스트.
|
||||
|
||||
실행:
|
||||
cd web-backend/stock
|
||||
python -m unittest app.test_price_fetcher -v
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
# app 패키지를 직접 실행 가능하도록
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from app.price_fetcher import _select_price_from_response
|
||||
|
||||
|
||||
class SelectPriceFromResponseTest(unittest.TestCase):
|
||||
def test_regular_session_uses_close_price(self):
|
||||
"""정규장 운영 중이면 closePrice를 REGULAR 세션으로 반환."""
|
||||
payload = {
|
||||
"closePrice": "70,500",
|
||||
"marketStatus": "OPEN",
|
||||
"localTradedAt": "2026-05-11T11:23:45+09:00",
|
||||
"overMarketPriceInfo": None,
|
||||
}
|
||||
result = _select_price_from_response(payload)
|
||||
self.assertEqual(result["price"], 70500)
|
||||
self.assertEqual(result["session"], "REGULAR")
|
||||
self.assertEqual(result["as_of"], "2026-05-11T11:23:45+09:00")
|
||||
|
||||
def test_nxt_after_market_open_uses_over_price(self):
|
||||
"""정규장 마감 + NXT 애프터마켓 운영중이면 overPrice를 NXT_AFTER 세션으로 반환."""
|
||||
payload = {
|
||||
"closePrice": "285,500",
|
||||
"marketStatus": "CLOSE",
|
||||
"localTradedAt": "2026-05-11T15:30:00+09:00",
|
||||
"overMarketPriceInfo": {
|
||||
"tradingSessionType": "AFTER_MARKET",
|
||||
"overMarketStatus": "OPEN",
|
||||
"overPrice": "285,000",
|
||||
"localTradedAt": "2026-05-11T19:21:40+09:00",
|
||||
"tradeStopType": {"name": "TRADING"},
|
||||
},
|
||||
}
|
||||
result = _select_price_from_response(payload)
|
||||
self.assertEqual(result["price"], 285000)
|
||||
self.assertEqual(result["session"], "NXT_AFTER")
|
||||
self.assertEqual(result["as_of"], "2026-05-11T19:21:40+09:00")
|
||||
|
||||
def test_nxt_pre_market_open_uses_over_price(self):
|
||||
"""NXT 프리마켓 운영중이면 NXT_PRE 세션 + overPrice."""
|
||||
payload = {
|
||||
"closePrice": "70,500",
|
||||
"marketStatus": "CLOSE",
|
||||
"localTradedAt": "2026-05-10T15:30:00+09:00",
|
||||
"overMarketPriceInfo": {
|
||||
"tradingSessionType": "PRE_MARKET",
|
||||
"overMarketStatus": "OPEN",
|
||||
"overPrice": "70,800",
|
||||
"localTradedAt": "2026-05-11T08:30:00+09:00",
|
||||
"tradeStopType": {"name": "TRADING"},
|
||||
},
|
||||
}
|
||||
result = _select_price_from_response(payload)
|
||||
self.assertEqual(result["price"], 70800)
|
||||
self.assertEqual(result["session"], "NXT_PRE")
|
||||
self.assertEqual(result["as_of"], "2026-05-11T08:30:00+09:00")
|
||||
|
||||
def test_nxt_closed_falls_back_to_close_price(self):
|
||||
"""NXT가 CLOSE 상태이면 closePrice 사용, 세션은 CLOSED."""
|
||||
payload = {
|
||||
"closePrice": "285,500",
|
||||
"marketStatus": "CLOSE",
|
||||
"localTradedAt": "2026-05-11T15:30:00+09:00",
|
||||
"overMarketPriceInfo": {
|
||||
"tradingSessionType": "AFTER_MARKET",
|
||||
"overMarketStatus": "CLOSE",
|
||||
"overPrice": "285,000",
|
||||
"tradeStopType": {"name": "TRADING"},
|
||||
},
|
||||
}
|
||||
result = _select_price_from_response(payload)
|
||||
self.assertEqual(result["price"], 285500)
|
||||
self.assertEqual(result["session"], "CLOSED")
|
||||
|
||||
def test_nxt_trading_halted_falls_back_to_close_price(self):
|
||||
"""NXT OPEN이지만 tradeStopType이 TRADING이 아니면 closePrice 사용."""
|
||||
payload = {
|
||||
"closePrice": "285,500",
|
||||
"marketStatus": "CLOSE",
|
||||
"overMarketPriceInfo": {
|
||||
"tradingSessionType": "AFTER_MARKET",
|
||||
"overMarketStatus": "OPEN",
|
||||
"overPrice": "285,000",
|
||||
"tradeStopType": {"name": "STOP"},
|
||||
},
|
||||
}
|
||||
result = _select_price_from_response(payload)
|
||||
self.assertEqual(result["price"], 285500)
|
||||
self.assertEqual(result["session"], "CLOSED")
|
||||
|
||||
def test_no_over_market_info_returns_close_price(self):
|
||||
"""overMarketPriceInfo 자체가 없는 경우(해외 종목 등) closePrice 그대로."""
|
||||
payload = {
|
||||
"closePrice": "150,000",
|
||||
"marketStatus": "CLOSE",
|
||||
"localTradedAt": "2026-05-11T15:30:00+09:00",
|
||||
}
|
||||
result = _select_price_from_response(payload)
|
||||
self.assertEqual(result["price"], 150000)
|
||||
self.assertEqual(result["session"], "CLOSED")
|
||||
|
||||
def test_missing_close_price_returns_none(self):
|
||||
"""closePrice가 없거나 비숫자면 price는 None."""
|
||||
payload = {"closePrice": "", "marketStatus": "CLOSE"}
|
||||
result = _select_price_from_response(payload)
|
||||
self.assertIsNone(result["price"])
|
||||
|
||||
def test_alternate_stock_end_price_field(self):
|
||||
"""일부 응답은 stockEndPrice 필드를 사용 — 폴백 인식."""
|
||||
payload = {
|
||||
"stockEndPrice": "12,345",
|
||||
"marketStatus": "OPEN",
|
||||
}
|
||||
result = _select_price_from_response(payload)
|
||||
self.assertEqual(result["price"], 12345)
|
||||
self.assertEqual(result["session"], "REGULAR")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
61
stock/app/test_screener_context.py
Normal file
61
stock/app/test_screener_context.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import datetime as dt
|
||||
import sqlite3
|
||||
|
||||
import pandas as pd
|
||||
import pytest
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.schema import ensure_screener_schema
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def conn(tmp_path):
|
||||
db_path = tmp_path / "ctx.db"
|
||||
c = sqlite3.connect(db_path)
|
||||
ensure_screener_schema(c)
|
||||
yield c
|
||||
c.close()
|
||||
|
||||
|
||||
def _seed(conn, master_df, prices_df, flow_df):
|
||||
now = dt.datetime.utcnow().isoformat()
|
||||
for t, row in master_df.iterrows():
|
||||
conn.execute("""INSERT INTO krx_master (ticker,name,market,market_cap,
|
||||
is_managed,is_preferred,is_spac,listed_date,updated_at)
|
||||
VALUES (?,?,?,?,?,?,?,?,?)""",
|
||||
(t, row["name"], row["market"], row["market_cap"],
|
||||
row["is_managed"], row["is_preferred"], row["is_spac"], None, now))
|
||||
prices_df.to_sql("krx_daily_prices", conn, if_exists="append", index=False)
|
||||
flow_df.to_sql("krx_flow", conn, if_exists="append", index=False)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def test_load_returns_dataframes(conn):
|
||||
asof = dt.date(2026, 5, 12)
|
||||
_seed(conn,
|
||||
make_master(["005930", "035420"]),
|
||||
make_prices(["005930", "035420"], days=30, asof=asof),
|
||||
make_flow(["005930", "035420"], days=30, asof=asof))
|
||||
|
||||
ctx = ScreenContext.load(conn, asof, lookback_days=30)
|
||||
|
||||
assert ctx.asof == asof
|
||||
assert set(ctx.master.index) == {"005930", "035420"}
|
||||
assert ctx.prices.shape[0] == 60 # 2 종목 × 30일
|
||||
assert ctx.flow.shape[0] == 60
|
||||
|
||||
|
||||
def test_restrict_filters_tickers(conn):
|
||||
asof = dt.date(2026, 5, 12)
|
||||
_seed(conn,
|
||||
make_master(["005930", "035420", "091990"]),
|
||||
make_prices(["005930", "035420", "091990"], days=30, asof=asof),
|
||||
make_flow(["005930", "035420", "091990"], days=30, asof=asof))
|
||||
|
||||
ctx = ScreenContext.load(conn, asof, lookback_days=30)
|
||||
scoped = ctx.restrict(pd.Index(["005930"]))
|
||||
|
||||
assert list(scoped.master.index) == ["005930"]
|
||||
assert (scoped.prices["ticker"] == "005930").all()
|
||||
assert (scoped.flow["ticker"] == "005930").all()
|
||||
55
stock/app/test_screener_engine.py
Normal file
55
stock/app/test_screener_engine.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
import pytest
|
||||
|
||||
from app.screener.engine import ScreenContext, Screener, combine
|
||||
from app.screener.nodes.hygiene import HygieneGate
|
||||
from app.screener.nodes.foreign_buy import ForeignBuy
|
||||
from app.screener.nodes.momentum import Momentum20
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow, make_kospi
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=make_kospi(days=260),
|
||||
asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_combine_weighted_average():
|
||||
scores = {
|
||||
"foreign_buy": pd.Series({"A": 80, "B": 20}),
|
||||
"momentum": pd.Series({"A": 60, "B": 40}),
|
||||
}
|
||||
weights = {"foreign_buy": 2.0, "momentum": 1.0}
|
||||
out = combine(scores, weights)
|
||||
# A: (80*2 + 60*1)/3 = 73.33
|
||||
assert abs(out["A"] - 73.333) < 0.1
|
||||
assert abs(out["B"] - 26.666) < 0.1
|
||||
|
||||
|
||||
def test_combine_all_zero_weight_raises():
|
||||
scores = {"foreign_buy": pd.Series({"A": 80})}
|
||||
with pytest.raises(ValueError, match="no active"):
|
||||
combine(scores, {"foreign_buy": 0})
|
||||
|
||||
|
||||
def test_screener_run_end_to_end():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["GOOD", "SMALL"],
|
||||
market_caps={"GOOD": 200_000_000_000, "SMALL": 1_000_000_000})
|
||||
prices = make_prices(["GOOD", "SMALL"], days=260, asof=asof, trend_pct=0.1)
|
||||
flow = make_flow(["GOOD", "SMALL"], days=260, asof=asof,
|
||||
foreign_per_day={"GOOD": 100_000_000, "SMALL": 0})
|
||||
ctx = _ctx(master, prices, flow)
|
||||
|
||||
screener = Screener(
|
||||
gate=HygieneGate(),
|
||||
score_nodes=[ForeignBuy(), Momentum20()],
|
||||
weights={"foreign_buy": 1.0, "momentum": 1.0},
|
||||
node_params={"foreign_buy": {"window_days": 5}, "momentum": {"window_days": 20}},
|
||||
gate_params={**HygieneGate.default_params, "min_listed_days": 0},
|
||||
top_n=10,
|
||||
)
|
||||
result = screener.run(ctx)
|
||||
assert result.survivors_count == 1 # SMALL은 게이트 탈락
|
||||
assert result.ranked.index[0] == "GOOD"
|
||||
24
stock/app/test_screener_nodes_base.py
Normal file
24
stock/app/test_screener_nodes_base.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import pandas as pd
|
||||
import pytest
|
||||
|
||||
from app.screener.nodes.base import percentile_rank
|
||||
|
||||
|
||||
def test_percentile_rank_basic():
|
||||
s = pd.Series([10, 20, 30, 40, 50])
|
||||
out = percentile_rank(s)
|
||||
assert (out >= 0).all() and (out <= 100).all()
|
||||
assert out.iloc[0] < out.iloc[-1] # smallest gets lowest rank
|
||||
|
||||
|
||||
def test_percentile_rank_all_equal_returns_50():
|
||||
s = pd.Series([42, 42, 42, 42])
|
||||
out = percentile_rank(s)
|
||||
assert (out == 50.0).all()
|
||||
|
||||
|
||||
def test_percentile_rank_handles_nan():
|
||||
s = pd.Series([1.0, float("nan"), 3.0, 5.0])
|
||||
out = percentile_rank(s)
|
||||
assert pd.isna(out.iloc[1])
|
||||
assert (out.dropna() >= 0).all()
|
||||
32
stock/app/test_screener_nodes_foreign_buy.py
Normal file
32
stock/app/test_screener_nodes_foreign_buy.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.nodes.foreign_buy import ForeignBuy
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=pd.Series(dtype=float, name="kospi"),
|
||||
asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_higher_foreign_buy_gets_higher_score():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["A", "B"])
|
||||
prices = make_prices(["A", "B"], days=30, asof=asof)
|
||||
flow = make_flow(["A", "B"], days=30, asof=asof,
|
||||
foreign_per_day={"A": 100_000_000, "B": 0})
|
||||
out = ForeignBuy().compute(_ctx(master, prices, flow), {"window_days": 5})
|
||||
assert out["A"] > out["B"]
|
||||
assert 0 <= out.min() <= out.max() <= 100
|
||||
|
||||
|
||||
def test_all_zero_returns_50():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["A", "B"])
|
||||
prices = make_prices(["A", "B"], days=30, asof=asof)
|
||||
flow = make_flow(["A", "B"], days=30, asof=asof, foreign_per_day={"A": 0, "B": 0})
|
||||
out = ForeignBuy().compute(_ctx(master, prices, flow), {"window_days": 5})
|
||||
assert (out == 50.0).all()
|
||||
32
stock/app/test_screener_nodes_high52w.py
Normal file
32
stock/app/test_screener_nodes_high52w.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.nodes.high52w import High52WProximity
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=pd.Series(dtype=float, name="kospi"),
|
||||
asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_proximity_at_high_returns_100():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["A"])
|
||||
prices = make_prices(["A"], days=260, asof=asof, trend_pct=0.05)
|
||||
flow = make_flow(["A"], days=260, asof=asof)
|
||||
|
||||
out = High52WProximity().compute(_ctx(master, prices, flow), {"window_days": 252})
|
||||
assert out["A"] >= 95
|
||||
|
||||
|
||||
def test_proximity_below_70pct_returns_0():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["A"])
|
||||
prices = make_prices(["A"], days=260, asof=asof, start_close=100000, trend_pct=-0.5)
|
||||
flow = make_flow(["A"], days=260, asof=asof)
|
||||
|
||||
out = High52WProximity().compute(_ctx(master, prices, flow), {"window_days": 252})
|
||||
assert out["A"] == 0
|
||||
46
stock/app/test_screener_nodes_hygiene.py
Normal file
46
stock/app/test_screener_nodes_hygiene.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import datetime as dt
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.nodes.hygiene import HygieneGate
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(
|
||||
master=master, prices=prices, flow=flow,
|
||||
kospi=pd.Series(dtype=float, name="kospi"),
|
||||
asof=dt.date(2026, 5, 12),
|
||||
)
|
||||
|
||||
|
||||
def test_filter_excludes_small_cap():
|
||||
g = HygieneGate()
|
||||
ctx = _ctx(
|
||||
make_master(["A", "B"], market_caps={"A": 1_000_000_000, "B": 100_000_000_000}),
|
||||
make_prices(["A", "B"], days=30),
|
||||
make_flow(["A", "B"], days=30),
|
||||
)
|
||||
out = g.filter(ctx, {**g.default_params, "min_listed_days": 0})
|
||||
assert list(out) == ["B"]
|
||||
|
||||
|
||||
def test_filter_excludes_preferred():
|
||||
g = HygieneGate()
|
||||
ctx = _ctx(
|
||||
make_master(["A", "B"], preferred={"B"}),
|
||||
make_prices(["A", "B"], days=30),
|
||||
make_flow(["A", "B"], days=30),
|
||||
)
|
||||
out = g.filter(ctx, {**g.default_params, "min_listed_days": 0})
|
||||
assert list(out) == ["A"]
|
||||
|
||||
|
||||
def test_filter_excludes_low_value():
|
||||
g = HygieneGate()
|
||||
prices = make_prices(["A", "B"], days=30)
|
||||
prices.loc[prices["ticker"] == "A", "value"] = 100_000 # 매우 작음
|
||||
ctx = _ctx(make_master(["A", "B"]), prices, make_flow(["A", "B"], days=30))
|
||||
out = g.filter(ctx, {**g.default_params, "min_listed_days": 0})
|
||||
assert list(out) == ["B"]
|
||||
30
stock/app/test_screener_nodes_ma_alignment.py
Normal file
30
stock/app/test_screener_nodes_ma_alignment.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.nodes.ma_alignment import MaAlignment
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=pd.Series(dtype=float, name="kospi"),
|
||||
asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_strong_uptrend_returns_100():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["UP"])
|
||||
prices = make_prices(["UP"], days=260, asof=asof, start_close=50000, trend_pct=0.2)
|
||||
flow = make_flow(["UP"], days=260, asof=asof)
|
||||
out = MaAlignment().compute(_ctx(master, prices, flow), MaAlignment.default_params)
|
||||
assert out["UP"] == 100.0
|
||||
|
||||
|
||||
def test_downtrend_returns_low():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["DN"])
|
||||
prices = make_prices(["DN"], days=260, asof=asof, start_close=100000, trend_pct=-0.1)
|
||||
flow = make_flow(["DN"], days=260, asof=asof)
|
||||
out = MaAlignment().compute(_ctx(master, prices, flow), MaAlignment.default_params)
|
||||
assert out["DN"] <= 20.0
|
||||
24
stock/app/test_screener_nodes_momentum.py
Normal file
24
stock/app/test_screener_nodes_momentum.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.nodes.momentum import Momentum20
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=pd.Series(dtype=float, name="kospi"),
|
||||
asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_higher_momentum_gets_higher_score():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["UP", "DN"])
|
||||
up = make_prices(["UP"], days=30, asof=asof, trend_pct=0.5)
|
||||
dn = make_prices(["DN"], days=30, asof=asof, trend_pct=-0.3)
|
||||
prices = pd.concat([up, dn], ignore_index=True)
|
||||
flow = make_flow(["UP", "DN"], days=30, asof=asof)
|
||||
|
||||
out = Momentum20().compute(_ctx(master, prices, flow), {"window_days": 20})
|
||||
assert out["UP"] > out["DN"]
|
||||
25
stock/app/test_screener_nodes_rs_rating.py
Normal file
25
stock/app/test_screener_nodes_rs_rating.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.nodes.rs_rating import RsRating
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow, make_kospi
|
||||
|
||||
|
||||
def _ctx(master, prices, flow, kospi):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=kospi, asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_outperformer_gets_higher_score():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["UP", "DN"])
|
||||
up = make_prices(["UP"], days=260, asof=asof, trend_pct=0.3)
|
||||
dn = make_prices(["DN"], days=260, asof=asof, trend_pct=-0.1)
|
||||
prices = pd.concat([up, dn], ignore_index=True)
|
||||
flow = make_flow(["UP", "DN"], days=260, asof=asof)
|
||||
kospi = make_kospi(days=260, asof=asof, trend_pct=0.0)
|
||||
|
||||
out = RsRating().compute(_ctx(master, prices, flow, kospi),
|
||||
RsRating.default_params)
|
||||
assert out["UP"] > out["DN"]
|
||||
36
stock/app/test_screener_nodes_vcp_lite.py
Normal file
36
stock/app/test_screener_nodes_vcp_lite.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.nodes.vcp_lite import VcpLite
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=pd.Series(dtype=float, name="kospi"),
|
||||
asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_contracting_stock_scores_higher_than_expanding():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["CON", "EXP"])
|
||||
prices = make_prices(["CON", "EXP"], days=260, asof=asof)
|
||||
|
||||
# CON: 최근 40일 변동성 축소 (high/low 좁힘)
|
||||
mask_recent_con = (prices["ticker"] == "CON") & (
|
||||
prices["date"] >= (asof - dt.timedelta(days=40)).isoformat()
|
||||
)
|
||||
prices.loc[mask_recent_con, "high"] = (prices.loc[mask_recent_con, "close"] * 1.003).astype(int)
|
||||
prices.loc[mask_recent_con, "low"] = (prices.loc[mask_recent_con, "close"] * 0.997).astype(int)
|
||||
|
||||
# EXP: 최근 40일 변동성 확대
|
||||
mask_recent_exp = (prices["ticker"] == "EXP") & (
|
||||
prices["date"] >= (asof - dt.timedelta(days=40)).isoformat()
|
||||
)
|
||||
prices.loc[mask_recent_exp, "high"] = (prices.loc[mask_recent_exp, "close"] * 1.05).astype(int)
|
||||
prices.loc[mask_recent_exp, "low"] = (prices.loc[mask_recent_exp, "close"] * 0.95).astype(int)
|
||||
|
||||
flow = make_flow(["CON", "EXP"], days=260, asof=asof)
|
||||
out = VcpLite().compute(_ctx(master, prices, flow), VcpLite.default_params)
|
||||
assert out["CON"] > out["EXP"]
|
||||
28
stock/app/test_screener_nodes_volume_surge.py
Normal file
28
stock/app/test_screener_nodes_volume_surge.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.nodes.volume_surge import VolumeSurge
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=pd.Series(dtype=float, name="kospi"),
|
||||
asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_recent_volume_surge_gets_higher_score():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["A", "B"])
|
||||
prices = make_prices(["A", "B"], days=30, asof=asof)
|
||||
# A는 최근 3일 거래량 10배로
|
||||
mask = (prices["ticker"] == "A") & (prices["date"] >= (asof - dt.timedelta(days=3)).isoformat())
|
||||
prices.loc[mask, "volume"] *= 10
|
||||
flow = make_flow(["A", "B"], days=30, asof=asof)
|
||||
|
||||
out = VolumeSurge().compute(
|
||||
_ctx(master, prices, flow),
|
||||
{"baseline_days": 20, "eval_days": 3},
|
||||
)
|
||||
assert out["A"] > out["B"]
|
||||
33
stock/app/test_screener_position_sizer.py
Normal file
33
stock/app/test_screener_position_sizer.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import datetime as dt
|
||||
import pandas as pd
|
||||
|
||||
from app.screener.engine import ScreenContext
|
||||
from app.screener.position_sizer import compute_atr_wilder, plan_positions
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _ctx(master, prices, flow):
|
||||
return ScreenContext(master=master, prices=prices, flow=flow,
|
||||
kospi=pd.Series(dtype=float, name="kospi"),
|
||||
asof=dt.date(2026, 5, 12))
|
||||
|
||||
|
||||
def test_atr_wilder_positive_and_smooth():
|
||||
df = make_prices(["A"], days=30)
|
||||
atr = compute_atr_wilder(df[df["ticker"] == "A"], window=14)
|
||||
assert atr > 0
|
||||
|
||||
|
||||
def test_plan_positions_returns_entry_stop_target():
|
||||
asof = dt.date(2026, 5, 12)
|
||||
master = make_master(["A"])
|
||||
prices = make_prices(["A"], days=30, asof=asof, start_close=50000)
|
||||
flow = make_flow(["A"], days=30, asof=asof)
|
||||
ctx = _ctx(master, prices, flow)
|
||||
sizing = plan_positions(ctx, ["A"], {"atr_window": 14, "atr_stop_mult": 2.0, "rr_ratio": 2.0})
|
||||
|
||||
row = sizing["A"]
|
||||
assert row["entry_price"] > 0
|
||||
assert row["stop_price"] < row["entry_price"]
|
||||
assert row["target_price"] > row["entry_price"]
|
||||
assert row["atr14"] > 0
|
||||
155
stock/app/test_screener_router.py
Normal file
155
stock/app/test_screener_router.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import os
|
||||
import sqlite3
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from app.screener.schema import ensure_screener_schema
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def isolated_db(tmp_path, monkeypatch):
|
||||
db_path = tmp_path / "screener_router.db"
|
||||
c = sqlite3.connect(db_path)
|
||||
ensure_screener_schema(c)
|
||||
c.close()
|
||||
monkeypatch.setenv("STOCK_DB_PATH", str(db_path))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client():
|
||||
from app.main import app
|
||||
return TestClient(app)
|
||||
|
||||
|
||||
def test_get_nodes_lists_8_score_and_1_gate(client):
|
||||
r = client.get("/api/stock/screener/nodes")
|
||||
assert r.status_code == 200
|
||||
body = r.json()
|
||||
assert len(body["score_nodes"]) == 8
|
||||
assert len(body["gate_nodes"]) == 1
|
||||
assert {n["name"] for n in body["score_nodes"]} == {
|
||||
"foreign_buy", "volume_surge", "momentum",
|
||||
"high52w", "rs_rating", "ma_alignment", "vcp_lite",
|
||||
"ai_news",
|
||||
}
|
||||
|
||||
|
||||
def test_settings_get_returns_defaults(client):
|
||||
r = client.get("/api/stock/screener/settings")
|
||||
assert r.status_code == 200
|
||||
body = r.json()
|
||||
assert body["weights"]["foreign_buy"] == 1.0
|
||||
assert body["top_n"] == 20
|
||||
|
||||
|
||||
def test_settings_put_then_get_round_trip(client):
|
||||
new_settings = {
|
||||
"weights": {"foreign_buy": 2.5, "momentum": 1.0, "volume_surge": 1.0,
|
||||
"high52w": 1.2, "rs_rating": 1.2, "ma_alignment": 1.0, "vcp_lite": 0.8},
|
||||
"node_params": {"foreign_buy": {"window_days": 7}},
|
||||
"gate_params": {"min_market_cap_won": 100_000_000_000,
|
||||
"min_avg_value_won": 500_000_000,
|
||||
"min_listed_days": 60,
|
||||
"skip_managed": True, "skip_preferred": True, "skip_spac": True,
|
||||
"skip_halted_days": 3},
|
||||
"top_n": 30,
|
||||
"rr_ratio": 2.5,
|
||||
"atr_window": 14,
|
||||
"atr_stop_mult": 2.0,
|
||||
}
|
||||
r = client.put("/api/stock/screener/settings", json=new_settings)
|
||||
assert r.status_code == 200
|
||||
r2 = client.get("/api/stock/screener/settings")
|
||||
body = r2.json()
|
||||
assert body["weights"]["foreign_buy"] == 2.5
|
||||
assert body["top_n"] == 30
|
||||
|
||||
|
||||
# ---- /run tests ----
|
||||
|
||||
from app.screener._test_fixtures import make_master, make_prices, make_flow
|
||||
|
||||
|
||||
def _seed_min(conn, asof_iso="2026-05-12"):
|
||||
import datetime as dt
|
||||
now = dt.datetime.utcnow().isoformat()
|
||||
rows = [
|
||||
("BIG1", "큰주식1", "KOSPI", 200_000_000_000, 0, 0, 0, None, now),
|
||||
("BIG2", "큰주식2", "KOSPI", 100_000_000_000, 0, 0, 0, None, now),
|
||||
("SMALL", "작은주식", "KOSPI", 1_000_000_000, 0, 0, 0, None, now),
|
||||
]
|
||||
for r in rows:
|
||||
conn.execute("""INSERT INTO krx_master (ticker,name,market,market_cap,
|
||||
is_managed,is_preferred,is_spac,listed_date,updated_at)
|
||||
VALUES (?,?,?,?,?,?,?,?,?)""", r)
|
||||
asof = dt.date(2026, 5, 12)
|
||||
p = make_prices(["BIG1", "BIG2", "SMALL"], days=260, asof=asof)
|
||||
f = make_flow(["BIG1", "BIG2", "SMALL"], days=260, asof=asof,
|
||||
foreign_per_day={"BIG1": 100_000_000, "BIG2": 50_000_000, "SMALL": 0})
|
||||
p.to_sql("krx_daily_prices", conn, if_exists="append", index=False)
|
||||
f.to_sql("krx_flow", conn, if_exists="append", index=False)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def test_run_preview_no_save(client):
|
||||
db_path = os.environ["STOCK_DB_PATH"]
|
||||
c = sqlite3.connect(db_path)
|
||||
_seed_min(c)
|
||||
c.close()
|
||||
|
||||
r = client.post("/api/stock/screener/run", json={"mode": "preview", "asof": "2026-05-12"})
|
||||
assert r.status_code == 200
|
||||
body = r.json()
|
||||
assert body["status"] == "success"
|
||||
assert body["run_id"] is None
|
||||
assert body["telegram_payload"] is not None
|
||||
|
||||
c = sqlite3.connect(db_path)
|
||||
cnt = c.execute("SELECT count(*) FROM screener_runs").fetchone()[0]
|
||||
assert cnt == 0
|
||||
|
||||
|
||||
def test_run_manual_save_writes_row(client):
|
||||
db_path = os.environ["STOCK_DB_PATH"]
|
||||
c = sqlite3.connect(db_path)
|
||||
_seed_min(c)
|
||||
c.close()
|
||||
|
||||
r = client.post("/api/stock/screener/run",
|
||||
json={"mode": "manual_save", "asof": "2026-05-12"})
|
||||
assert r.status_code == 200
|
||||
assert r.json()["run_id"] is not None
|
||||
|
||||
c = sqlite3.connect(db_path)
|
||||
cnt = c.execute("SELECT count(*) FROM screener_runs").fetchone()[0]
|
||||
assert cnt == 1
|
||||
|
||||
|
||||
def test_runs_list_and_detail(client):
|
||||
db_path = os.environ["STOCK_DB_PATH"]
|
||||
c = sqlite3.connect(db_path)
|
||||
_seed_min(c)
|
||||
c.close()
|
||||
|
||||
saved = client.post(
|
||||
"/api/stock/screener/run",
|
||||
json={"mode": "manual_save", "asof": "2026-05-12"},
|
||||
).json()
|
||||
run_id = saved["run_id"]
|
||||
|
||||
list_r = client.get("/api/stock/screener/runs?limit=5")
|
||||
assert list_r.status_code == 200
|
||||
assert any(r["id"] == run_id for r in list_r.json())
|
||||
|
||||
detail = client.get(f"/api/stock/screener/runs/{run_id}")
|
||||
assert detail.status_code == 200
|
||||
assert detail.json()["meta"]["id"] == run_id
|
||||
assert isinstance(detail.json()["results"], list)
|
||||
|
||||
|
||||
def test_run_holiday_returns_skipped(client):
|
||||
# 2026-05-09는 토요일 (주말). _is_holiday 가 weekday>=5를 잡음.
|
||||
r = client.post("/api/stock/screener/run",
|
||||
json={"mode": "auto", "asof": "2026-05-09"})
|
||||
assert r.status_code == 200
|
||||
assert r.json()["status"] == "skipped_holiday"
|
||||
37
stock/app/test_screener_schema.py
Normal file
37
stock/app/test_screener_schema.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import sqlite3
|
||||
from app.screener.schema import ensure_screener_schema
|
||||
|
||||
|
||||
def test_creates_all_tables(tmp_path):
|
||||
db_path = tmp_path / "test.db"
|
||||
conn = sqlite3.connect(db_path)
|
||||
ensure_screener_schema(conn)
|
||||
|
||||
tables = {r[0] for r in conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table'"
|
||||
).fetchall()}
|
||||
|
||||
expected = {
|
||||
"krx_master", "krx_daily_prices", "krx_flow",
|
||||
"screener_settings", "screener_runs", "screener_results",
|
||||
}
|
||||
assert expected.issubset(tables)
|
||||
|
||||
|
||||
def test_settings_seeded_with_singleton_row(tmp_path):
|
||||
db_path = tmp_path / "test.db"
|
||||
conn = sqlite3.connect(db_path)
|
||||
ensure_screener_schema(conn)
|
||||
|
||||
rows = conn.execute("SELECT id FROM screener_settings").fetchall()
|
||||
assert rows == [(1,)]
|
||||
|
||||
|
||||
def test_idempotent(tmp_path):
|
||||
db_path = tmp_path / "test.db"
|
||||
conn = sqlite3.connect(db_path)
|
||||
ensure_screener_schema(conn)
|
||||
ensure_screener_schema(conn) # 두 번 호출해도 에러 없어야 함
|
||||
|
||||
rows = conn.execute("SELECT count(*) FROM screener_settings").fetchall()
|
||||
assert rows == [(1,)]
|
||||
129
stock/app/test_screener_snapshot.py
Normal file
129
stock/app/test_screener_snapshot.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import datetime as dt
|
||||
import sqlite3
|
||||
|
||||
import pandas as pd
|
||||
import pytest
|
||||
|
||||
from app.screener import snapshot as snap
|
||||
from app.screener.schema import ensure_screener_schema
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def conn(tmp_path):
|
||||
db_path = tmp_path / "snap.db"
|
||||
c = sqlite3.connect(db_path)
|
||||
ensure_screener_schema(c)
|
||||
yield c
|
||||
c.close()
|
||||
|
||||
|
||||
def _stub_listing(monkeypatch):
|
||||
df = pd.DataFrame([
|
||||
{"Code": "005930", "Name": "삼성전자", "Market": "KOSPI",
|
||||
"Marcap": 420_000_000_000_000,
|
||||
"Open": 70000, "High": 72000, "Low": 69500, "Close": 71000,
|
||||
"Volume": 12_000_000, "Amount": 840_000_000_000},
|
||||
{"Code": "035420", "Name": "NAVER", "Market": "KOSPI",
|
||||
"Marcap": 30_000_000_000_000,
|
||||
"Open": 215000, "High": 220000, "Low": 213000, "Close": 218000,
|
||||
"Volume": 1_000_000, "Amount": 218_000_000_000},
|
||||
{"Code": "091990", "Name": "셀트리온헬스케어우", "Market": "KOSDAQ",
|
||||
"Marcap": 10_000_000_000_000,
|
||||
"Open": 60000, "High": 61000, "Low": 59500, "Close": 60500,
|
||||
"Volume": 500_000, "Amount": 30_250_000_000},
|
||||
])
|
||||
monkeypatch.setattr(snap, "fetch_master_listing", lambda: df)
|
||||
|
||||
|
||||
def _stub_flow(monkeypatch, mapping):
|
||||
def fake_flow(ticker, *, client):
|
||||
if mapping is None:
|
||||
return None
|
||||
v = mapping.get(ticker)
|
||||
if v is None:
|
||||
return None
|
||||
return {
|
||||
"date": dt.date(2026, 5, 12).isoformat(),
|
||||
"foreign_net": v["foreign_net"],
|
||||
"institution_net": v["institution_net"],
|
||||
}
|
||||
monkeypatch.setattr(snap, "fetch_flow_naver", fake_flow)
|
||||
|
||||
|
||||
def test_refresh_daily_writes_master_and_prices(conn, monkeypatch):
|
||||
_stub_listing(monkeypatch)
|
||||
_stub_flow(monkeypatch, None)
|
||||
summary = snap.refresh_daily(conn, dt.date(2026, 5, 12),
|
||||
flow_top_n=10, rate_limit_sec=0)
|
||||
assert summary["master_count"] == 3
|
||||
assert summary["prices_count"] == 3
|
||||
assert summary["flow_count"] == 0
|
||||
|
||||
row = conn.execute(
|
||||
"SELECT close FROM krx_daily_prices WHERE ticker='005930' AND date='2026-05-12'"
|
||||
).fetchone()
|
||||
assert row[0] == 71000
|
||||
|
||||
|
||||
def test_refresh_daily_writes_flow_for_top_n(conn, monkeypatch):
|
||||
_stub_listing(monkeypatch)
|
||||
_stub_flow(monkeypatch, {
|
||||
"005930": {"foreign_net": 12_000_000_000, "institution_net": 4_000_000_000},
|
||||
"035420": {"foreign_net": -3_000_000_000, "institution_net": 8_000_000_000},
|
||||
})
|
||||
summary = snap.refresh_daily(conn, dt.date(2026, 5, 12),
|
||||
flow_top_n=2, rate_limit_sec=0)
|
||||
assert summary["flow_count"] == 2
|
||||
row = conn.execute(
|
||||
"SELECT foreign_net FROM krx_flow WHERE ticker='005930'"
|
||||
).fetchone()
|
||||
assert row[0] == 12_000_000_000
|
||||
|
||||
|
||||
def test_master_flags_preferred(conn, monkeypatch):
|
||||
_stub_listing(monkeypatch)
|
||||
_stub_flow(monkeypatch, None)
|
||||
snap.refresh_daily(conn, dt.date(2026, 5, 12), flow_top_n=0, rate_limit_sec=0)
|
||||
pref = conn.execute(
|
||||
"SELECT is_preferred FROM krx_master WHERE ticker='091990'"
|
||||
).fetchone()
|
||||
assert pref[0] == 1
|
||||
|
||||
|
||||
def test_refresh_daily_is_idempotent(conn, monkeypatch):
|
||||
_stub_listing(monkeypatch)
|
||||
_stub_flow(monkeypatch, None)
|
||||
snap.refresh_daily(conn, dt.date(2026, 5, 12), flow_top_n=0, rate_limit_sec=0)
|
||||
snap.refresh_daily(conn, dt.date(2026, 5, 12), flow_top_n=0, rate_limit_sec=0)
|
||||
cnt = conn.execute(
|
||||
"SELECT count(*) FROM krx_daily_prices WHERE date='2026-05-12'"
|
||||
).fetchone()[0]
|
||||
assert cnt == 3
|
||||
|
||||
|
||||
def test_fetch_flow_naver_parses_html():
|
||||
"""Real HTML structure parse with synthetic naver-like markup."""
|
||||
html = """
|
||||
<html><body>
|
||||
<table class="type2">
|
||||
<tr><th>날짜</th></tr>
|
||||
<tr><td>2026.05.12</td><td>71,000</td><td>500</td><td>0.71%</td>
|
||||
<td>12,000,000</td><td>4,000,000,000</td><td>12,000,000,000</td>
|
||||
<td>1</td><td>53.0</td></tr>
|
||||
<tr><td>2026.05.09</td><td>70,500</td><td>-200</td><td>-0.28%</td>
|
||||
<td>10,000,000</td><td>2,000,000,000</td><td>5,000,000,000</td>
|
||||
<td>1</td><td>52.8</td></tr>
|
||||
</table>
|
||||
</body></html>
|
||||
"""
|
||||
class FakeResp:
|
||||
status_code = 200
|
||||
text = html
|
||||
class FakeClient:
|
||||
def get(self, url, params): return FakeResp()
|
||||
out = snap.fetch_flow_naver("005930", client=FakeClient())
|
||||
assert out == {
|
||||
"date": "2026-05-12",
|
||||
"foreign_net": 12_000_000_000,
|
||||
"institution_net": 4_000_000_000,
|
||||
}
|
||||
68
stock/app/test_screener_telegram.py
Normal file
68
stock/app/test_screener_telegram.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import datetime as dt
|
||||
from app.screener.telegram import build_telegram_payload
|
||||
|
||||
|
||||
def test_build_payload_includes_top10_and_link():
|
||||
rows = [
|
||||
{
|
||||
"rank": i, "ticker": f"00{i:04}", "name": f"종목{i}",
|
||||
"total_score": 90 - i,
|
||||
"scores": {"foreign_buy": 80 + i, "volume_surge": 60, "momentum": 70,
|
||||
"high52w": 75, "rs_rating": 85, "ma_alignment": 80, "vcp_lite": 30},
|
||||
"close": 50000, "entry_price": 50250, "stop_price": 48500,
|
||||
"target_price": 53750, "r_pct": 3.5,
|
||||
}
|
||||
for i in range(1, 21)
|
||||
]
|
||||
p = build_telegram_payload(
|
||||
asof=dt.date(2026, 5, 12),
|
||||
mode="auto",
|
||||
survivors_count=612,
|
||||
top_n=20,
|
||||
rows=rows,
|
||||
run_id=42,
|
||||
)
|
||||
assert p["parse_mode"] == "MarkdownV2"
|
||||
text = p["text"]
|
||||
assert "2026" in text and "05" in text and "12" in text
|
||||
assert "종목1" in text
|
||||
assert "종목10" in text
|
||||
assert "종목11" not in text # 본문 1-10만
|
||||
assert "42" in text # run_id 링크
|
||||
|
||||
|
||||
def test_score_threshold_filters_node_labels():
|
||||
rows = [{
|
||||
"rank": 1, "ticker": "A", "name": "A주",
|
||||
"total_score": 80,
|
||||
"scores": {"foreign_buy": 90, "volume_surge": 50, "momentum": 70,
|
||||
"high52w": 30, "rs_rating": 80, "ma_alignment": 80, "vcp_lite": 60},
|
||||
"close": 50000, "entry_price": 50250, "stop_price": 48500,
|
||||
"target_price": 53750, "r_pct": 3.5,
|
||||
}]
|
||||
p = build_telegram_payload(dt.date(2026, 5, 12), "auto", 100, 1, rows, run_id=1)
|
||||
text = p["text"]
|
||||
# ≥70 노드만 풀 라벨로 표시 (foreign_buy=90, momentum=70, rs_rating=80, ma_alignment=80)
|
||||
assert "외국인 90" in text
|
||||
assert "20일모멘텀 70" in text
|
||||
assert "RS레이팅 80" in text
|
||||
assert "이평선정배열 80" in text
|
||||
# <70 노드는 숨김 (volume_surge=50, high52w=30, vcp_lite=60)
|
||||
assert "거래량급증" not in text
|
||||
assert "52주신고가" not in text
|
||||
assert "VCP수축" not in text
|
||||
|
||||
|
||||
def test_prices_have_won_suffix():
|
||||
rows = [{
|
||||
"rank": 1, "ticker": "A", "name": "A주",
|
||||
"total_score": 80,
|
||||
"scores": {"foreign_buy": 80},
|
||||
"close": 50000, "entry_price": 50250, "stop_price": 48500,
|
||||
"target_price": 53750, "r_pct": 3.5,
|
||||
}]
|
||||
p = build_telegram_payload(dt.date(2026, 5, 12), "auto", 100, 1, rows, run_id=1)
|
||||
text = p["text"]
|
||||
assert "50,250원" in text
|
||||
assert "48,500원" in text
|
||||
assert "53,750원" in text
|
||||
Reference in New Issue
Block a user