lotto-lab: strategy_evolver — EMA/Softmax 가중치 진화 + 스마트 추천

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-06 21:15:42 +09:00
parent 706ca410ca
commit c9f10aca4a
2 changed files with 349 additions and 0 deletions

View File

@@ -0,0 +1,277 @@
"""
전략 진화 엔진 — EMA + Softmax 기반 적응형 가중치 관리.
"""
import math
import json
import logging
from typing import Dict, List, Any
logger = logging.getLogger("lotto-backend")
# ── Constants (importable without DB) ─────────────────────────────────────────
ALPHA = 0.3 # EMA 감쇠율
TEMPERATURE = 2.0 # Softmax 온도
MIN_WEIGHT = 0.05 # 최소 가중치
INITIAL_EMA = 0.15 # 콜드스타트 초기값
MIN_DATA_DRAWS = 10 # 학습 최소 회차
STRATEGIES = ["combined", "simulation", "heatmap", "manual", "custom"]
RANK_BONUS = {5: 0.1, 4: 0.3, 3: 0.6, 2: 0.8, 1: 1.0}
# ── Pure functions (no DB dependency) ─────────────────────────────────────────
def calc_draw_score(results: List[Dict]) -> float:
"""구매 결과 리스트 → 평균 성과 점수"""
if not results:
return 0.0
scores = []
for r in results:
s = r.get("correct", 0) / 6.0
s += RANK_BONUS.get(r.get("rank", 0), 0)
scores.append(s)
return sum(scores) / len(scores)
def _softmax_weights(ema_scores: Dict[str, float]) -> Dict[str, float]:
"""EMA 점수 → Softmax → 최소 가중치 보장 → 정규화"""
raw = {s: math.exp(ema / TEMPERATURE) for s, ema in ema_scores.items()}
total = sum(raw.values())
weights = {s: v / total for s, v in raw.items()}
clamped = {}
surplus = 0.0
unclamped = []
for s, w in weights.items():
if w < MIN_WEIGHT:
clamped[s] = MIN_WEIGHT
surplus += MIN_WEIGHT - w
else:
unclamped.append(s)
clamped[s] = w
if surplus > 0 and unclamped:
unclamped_total = sum(clamped[s] for s in unclamped)
for s in unclamped:
clamped[s] -= surplus * (clamped[s] / unclamped_total)
final_total = sum(clamped.values())
return {s: round(v / final_total, 4) for s, v in clamped.items()}
# ── DB-dependent functions (use lazy imports) ─────────────────────────────────
def _db():
"""Lazy import to avoid circular/relative import issues in tests"""
from . import db as _db_mod
return _db_mod
def _recommender():
from . import recommender as _rec_mod
return _rec_mod
def _analyzer():
from . import analyzer as _ana_mod
return _ana_mod
def update_ema_for_strategy(strategy: str, draw_score: float) -> float:
db = _db()
weights = db.get_strategy_weights()
current = next((w for w in weights if w["strategy"] == strategy), None)
old_ema = current["ema_score"] if current else INITIAL_EMA
new_ema = ALPHA * draw_score + (1 - ALPHA) * old_ema
return new_ema
def recalculate_weights() -> Dict[str, float]:
db = _db()
weights_rows = db.get_strategy_weights()
ema_scores = {w["strategy"]: w["ema_score"] for w in weights_rows}
for s in STRATEGIES:
if s not in ema_scores:
ema_scores[s] = INITIAL_EMA
new_weights = _softmax_weights(ema_scores)
for s, w in new_weights.items():
row = next((r for r in weights_rows if r["strategy"] == s), None)
db.update_strategy_weight(
strategy=s,
weight=w,
ema_score=ema_scores[s],
total_sets=row["total_sets"] if row else 0,
total_hits_3plus=row["total_hits_3plus"] if row else 0,
)
logger.info(f"[strategy_evolver] 가중치 재계산: {new_weights}")
return new_weights
def evolve_after_check(strategy: str, draw_no: int, results: List[Dict]) -> None:
db = _db()
draw_score = calc_draw_score(results)
new_ema = update_ema_for_strategy(strategy, draw_score)
weights_rows = db.get_strategy_weights()
current = next((w for w in weights_rows if w["strategy"] == strategy), None)
hits_3plus = sum(1 for r in results if r.get("correct", 0) >= 3)
db.update_strategy_weight(
strategy=strategy,
weight=current["weight"] if current else 0.2,
ema_score=new_ema,
total_sets=(current["total_sets"] if current else 0) + len(results),
total_hits_3plus=(current["total_hits_3plus"] if current else 0) + hits_3plus,
)
recalculate_weights()
def get_weights_with_trend() -> Dict[str, Any]:
db = _db()
weights = db.get_strategy_weights()
perfs = db.get_strategy_performance()
strat_perfs = {}
for p in perfs:
s = p["strategy"]
if s not in strat_perfs:
strat_perfs[s] = []
strat_perfs[s].append(p)
result = []
for w in weights:
sp = strat_perfs.get(w["strategy"], [])
if len(sp) >= 5:
recent_avg = sum(p["avg_score"] for p in sp[-3:]) / 3
older_avg = sum(p["avg_score"] for p in sp[-5:-2]) / 3
delta = recent_avg - older_avg
trend = "up" if delta > 0.02 else ("down" if delta < -0.02 else "stable")
else:
trend = "stable"
result.append({
"strategy": w["strategy"],
"weight": w["weight"],
"ema_score": w["ema_score"],
"total_sets": w["total_sets"],
"hits_3plus": w["total_hits_3plus"],
"trend": trend,
})
all_draws = set()
for p in perfs:
all_draws.add(p["draw_no"])
return {
"weights": result,
"last_evolved": weights[0]["updated_at"] if weights else None,
"min_data_draws": MIN_DATA_DRAWS,
"current_data_draws": len(all_draws),
"status": "active" if len(all_draws) >= MIN_DATA_DRAWS else "learning",
}
def generate_smart_recommendation(sets: int = 5) -> Dict[str, Any]:
db = _db()
rec = _recommender()
ana = _analyzer()
weights_data = db.get_strategy_weights()
weight_map = {w["strategy"]: w["weight"] for w in weights_data}
draws = db.get_all_draw_numbers()
if not draws:
return {"error": "No draw data"}
latest = db.get_latest_draw()
cache = ana.build_analysis_cache(draws)
past_recs = db.list_recommendations_ex(limit=100, sort="id_desc")
candidates = []
seen_keys = set()
def _add_candidate(nums: list, strategy: str, raw_score: float = None):
key = tuple(sorted(nums))
if key in seen_keys:
return
seen_keys.add(key)
if raw_score is None:
sc = ana.score_combination(nums, cache)
raw_score = sc["score_total"]
meta = raw_score * weight_map.get(strategy, 0.1)
candidates.append({
"numbers": sorted(nums),
"raw_score": round(raw_score, 4),
"strategy": strategy,
"meta_score": round(meta, 4),
})
# combined: 10세트
for _ in range(10):
try:
r = ana.generate_combined_recommendation(draws)
if "final_numbers" in r:
_add_candidate(r["final_numbers"], "combined")
except Exception:
pass
# simulation: best_picks 상위 10개
best = db.get_best_picks(limit=10)
for b in best:
nums = json.loads(b["numbers"]) if isinstance(b["numbers"], str) else b["numbers"]
_add_candidate(nums, "simulation", b.get("score_total"))
# heatmap: 10세트
for _ in range(10):
try:
r = rec.recommend_with_heatmap(draws, past_recs)
_add_candidate(r["numbers"], "heatmap")
except Exception:
pass
# manual: 10세트
for _ in range(10):
try:
r = rec.recommend_numbers(draws)
_add_candidate(r["numbers"], "manual")
except Exception:
pass
candidates.sort(key=lambda c: -c["meta_score"])
top = candidates[:sets]
result_sets = []
for c in top:
sc = ana.score_combination(c["numbers"], cache)
contributions = {}
for strat in STRATEGIES:
contributions[strat] = round(weight_map.get(strat, 0) * sc["score_total"], 4)
contrib_total = sum(contributions.values()) or 1
contributions = {s: round(v / contrib_total, 3) for s, v in contributions.items()}
result_sets.append({
"numbers": c["numbers"],
"meta_score": c["meta_score"],
"source_strategy": c["strategy"],
"contribution": contributions,
"individual_scores": {k: round(v, 4) for k, v in sc.items()},
})
perfs = db.get_strategy_performance()
data_draws = len(set(p["draw_no"] for p in perfs))
status = "active" if data_draws >= MIN_DATA_DRAWS else "learning"
return {
"sets": result_sets,
"strategy_weights_used": weight_map,
"learning_status": {
"draws_learned": data_draws,
"status": status,
"message": "" if status == "active" else f"{MIN_DATA_DRAWS}회차 이상 데이터 필요 (현재 {data_draws}회차)",
},
"based_on_latest_draw": latest["drw_no"] if latest else None,
}