import os import uvicorn import subprocess import sys from fastapi import FastAPI, Request from pydantic import BaseModel from typing import List, Optional from datetime import datetime from contextlib import asynccontextmanager from modules.config import Config from modules.services.ollama import OllamaManager from modules.services.kis import KISClient from modules.services.news import NewsCollector from modules.services.telegram import TelegramMessenger # 전역 객체 bot_process = None telegram_process = None messenger = TelegramMessenger() ai_agent = None kis_client = None news_collector = None import multiprocessing from modules.bot import AutoTradingBot from modules.services.telegram_bot.runner import run_telegram_bot_standalone # 봇 실행 래퍼 함수 def run_trading_bot(): bot = AutoTradingBot() bot.loop() @asynccontextmanager async def lifespan(app: FastAPI): # [Startup] global bot_process, telegram_process, messenger, ai_agent, kis_client, news_collector # 1. 설정 검증 Config.validate() # 2. 전역 객체 초기화 (서버용) ai_agent = OllamaManager() kis_client = KISClient() news_collector = NewsCollector() print("🤖 Starting AI Trading Bot & Telegram Bot (Multimedia Mode)...") # 3. 멀티프로세스 실행 # (1) 트레이딩 봇 bot_process = multiprocessing.Process(target=run_trading_bot) bot_process.start() # (2) 텔레그램 봇 (Polling) telegram_process = multiprocessing.Process(target=run_telegram_bot_standalone) telegram_process.start() messenger.send_message("🖥️ **[Server Started]** Windows AI Server (Refactored) Online.") yield # [Shutdown] print("🛑 Shutting down processes...") if telegram_process and telegram_process.is_alive(): print(" - Stopping Telegram Bot...") telegram_process.terminate() telegram_process.join() if bot_process and bot_process.is_alive(): print(" - Stopping Trading Bot...") bot_process.terminate() bot_process.join() messenger.send_message("🛑 **[Server Stopped]** Server Shutting Down.") app = FastAPI(title="Windows AI Stock Server", lifespan=lifespan) @app.middleware("http") async def log_requests(request: Request, call_next): print(f"📥 {request.method} {request.url}") response = await call_next(request) return response # 모델 정의 class ManualOrderRequest(BaseModel): ticker: str action: str # BUY, SELL quantity: int @app.get("/") def index(): vram = 0 if ai_agent: vram = ai_agent.check_vram() return { "status": "online", "gpu_vram": round(vram, 2), "service": "Windows AI Server (Refactored)" } @app.get("/trade/balance") @app.get("/api/trade/balance") async def get_balance(): if not kis_client: return {"error": "Server not initialized"} return kis_client.get_balance() @app.post("/trade/order") @app.post("/api/trade/order") async def manual_order(req: ManualOrderRequest): ticker = req.ticker qty = req.quantity action = req.action.upper() result = "No Action" if action == "BUY": result = kis_client.buy_stock(ticker, qty) elif action == "SELL": result = kis_client.sell_stock(ticker, qty) return {"status": "executed", "kis_result": result} @app.post("/analyze/portfolio") @app.post("/api/analyze/portfolio") async def analyze_portfolio(): # 간단화된 분석 로직 balance = kis_client.get_balance() news = news_collector.get_market_news() prompt = f""" Analyze this portfolio with recent news: Portfolio: {balance} News: {news} Response in Korean. """ analysis = ai_agent.request_inference(prompt) return {"analysis": analysis} if __name__ == "__main__": uvicorn.run("main_server:app", host="0.0.0.0", port=8000, reload=True)