commit 105b8b3db4935d1ad377faa2365ef2dba2829e36 Author: adminko <3477744av@gmail.com> Date: Tue Mar 10 16:58:02 2026 +0000 Initial MVP skeleton with auth, chat persistence, UI and text LLM integration diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..e017fd4 --- /dev/null +++ b/.env.example @@ -0,0 +1,38 @@ +APP_ENV=dev + +POSTGRES_DB=ai_chat +POSTGRES_USER=ai_chat +POSTGRES_PASSWORD=change_me_now + +ADMIN_BOOTSTRAP_LOGIN=admin +ADMIN_BOOTSTRAP_PASSWORD=change_me_later + +NEXT_PUBLIC_API_BASE_URL=http://127.0.0.1:18000 + +SESSION_SECRET=change_me_long_random_secret +SESSION_COOKIE_NAME=ai_chat_session +SESSION_COOKIE_SECURE=false +SESSION_COOKIE_SAMESITE=lax +SESSION_TTL_HOURS=168 + +LLM_MANAGER_BASE_URL=http://192.168.149.194:8001 +LLM_MANAGER_API_KEY=change_me +SEARXNG_BASE_URL=http://192.168.149.22:8888 + +UPLOAD_ROOT=/data/uploads +TEMP_ROOT=/data/temp +LOG_ROOT=/data/logs + +MAX_IMAGE_MB=10 +MAX_AUDIO_MB=25 +MAX_AUDIO_DURATION_SEC=300 +MAX_MESSAGE_CHARS=16000 + +TTS_TTL_HOURS=4 +TEMP_AUDIO_TTL_HOURS=24 +ORPHAN_FILE_GRACE_HOURS=24 + +SUMMARY_TRIGGER_MESSAGE_COUNT=30 +SUMMARY_KEEP_RECENT_MESSAGES=16 +SUMMARY_MAX_CHARS=8000 +SUMMARY_MODEL_ALIAS=qwen3.5-4b diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4e3bfa5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +.env +__pycache__/ +*.pyc +*.pyo +*.pyd +.venv/ +venv/ +node_modules/ +.next/ +dist/ +build/ +*.log +/tmp/ +walkthrough.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..84ef44b --- /dev/null +++ b/README.md @@ -0,0 +1,27 @@ +# AI Chat MVP + +Первый шаг: infra skeleton. + +## Быстрый старт + +```bash +cp .env.example .env +docker compose up -d --build + +Что должно подняться + +frontend: http://127.0.0.1:13000 + +backend: http://127.0.0.1:18000 + +backend docs: http://127.0.0.1:18000/docs + +Что пока реализовано + +docker-compose каркас + +postgres container + +backend health/ready + +frontend shell diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 0000000..89d0b7d --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,20 @@ +FROM python:3.11-slim + +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +WORKDIR /app + +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY app ./app + +EXPOSE 8000 + +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/backend/app/api/auth.py b/backend/app/api/auth.py new file mode 100644 index 0000000..874cd15 --- /dev/null +++ b/backend/app/api/auth.py @@ -0,0 +1,98 @@ +import os +from datetime import datetime, timedelta, timezone +from typing import Optional + +from fastapi import APIRouter, Depends, HTTPException, Request, Response, status +from pydantic import BaseModel +from sqlalchemy.orm import Session as DBSession +from sqlalchemy import select + +from app.core.config import settings +from app.core.security import verify_password +from app.db.session import get_db +from app.db.models import User, Session + +router = APIRouter() + +COOKIE_NAME = os.getenv("SESSION_COOKIE_NAME", "ai_chat_session") +SESSION_TTL_HOURS = int(os.getenv("SESSION_TTL_HOURS", "168")) + +class LoginRequest(BaseModel): + login: str + password: str + +class LoginResponse(BaseModel): + status: str + +class MeResponse(BaseModel): + login: str + +@router.post("/login", response_model=LoginResponse) +def login(login_data: LoginRequest, response: Response, db: DBSession = Depends(get_db)): + user = db.scalar(select(User).where(User.login == login_data.login)) + if not user or not user.is_active: + raise HTTPException(status_code=401, detail="Invalid credentials or inactive user") + + if not verify_password(login_data.password, user.hashed_password): + raise HTTPException(status_code=401, detail="Invalid credentials or inactive user") + + # Create session + expires = datetime.now(timezone.utc) + timedelta(hours=SESSION_TTL_HOURS) + # Strip timezone for naive datetime storage if DB expects it, depending on pg setup. Let's use naive UTC + expires_naive = expires.replace(tzinfo=None) + + db_session = Session( + user_id=user.id, + expires_at=expires_naive + ) + db.add(db_session) + db.commit() + db.refresh(db_session) + + # Set cookie + is_secure = os.getenv("SESSION_COOKIE_SECURE", "false").lower() == "true" + samesite = os.getenv("SESSION_COOKIE_SAMESITE", "lax").lower() + + response.set_cookie( + key=COOKIE_NAME, + value=db_session.id, + httponly=True, + secure=is_secure, + samesite=samesite, + max_age=SESSION_TTL_HOURS * 3600 + ) + + return {"status": "ok"} + +@router.post("/logout", response_model=LoginResponse) +def logout(request: Request, response: Response, db: DBSession = Depends(get_db)): + session_id = request.cookies.get(COOKIE_NAME) + if session_id: + db_session = db.get(Session, session_id) + if db_session: + db.delete(db_session) + db.commit() + + response.delete_cookie(key=COOKIE_NAME) + return {"status": "ok"} + +@router.get("/me", response_model=MeResponse) +def me(request: Request, db: DBSession = Depends(get_db)): + session_id = request.cookies.get(COOKIE_NAME) + if not session_id: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated") + + db_session = db.get(Session, session_id) + if not db_session: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid session") + + if db_session.expires_at < datetime.now(timezone.utc).replace(tzinfo=None): + db.delete(db_session) + db.commit() + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Session expired") + + user = db_session.user + if not user or not user.is_active: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="User inactive") + + return {"login": user.login} diff --git a/backend/app/api/chats.py b/backend/app/api/chats.py new file mode 100644 index 0000000..e37367e --- /dev/null +++ b/backend/app/api/chats.py @@ -0,0 +1,283 @@ +from typing import List, Optional +from datetime import datetime +from fastapi import APIRouter, Depends, HTTPException, status +from pydantic import BaseModel +from sqlalchemy.orm import Session as DBSession +from sqlalchemy import select, desc + +from app.db.session import get_db +from app.db.models import User, Chat, Message +from app.api.deps import get_current_user +from app.core.models_catalog import AVAILABLE_MODELS, ModelInfo +import logging +import sys + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +if not logger.handlers: + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) + logger.addHandler(handler) + +router = APIRouter() + +class ChatCreateRequest(BaseModel): + title: str = "New Chat" + model_alias: str + +class ChatResponse(BaseModel): + id: str + title: str + model_alias: str + created_at: datetime + updated_at: datetime + +class MessageCreateRequest(BaseModel): + content: str + role: str = "user" + +class MessageResponse(BaseModel): + id: str + role: str + content: str + created_at: datetime + +@router.get("/models", response_model=List[ModelInfo]) +def get_models(user: User = Depends(get_current_user)): + return AVAILABLE_MODELS + +@router.post("/chats", response_model=ChatResponse) +def create_chat( + req: ChatCreateRequest, + db: DBSession = Depends(get_db), + user: User = Depends(get_current_user) +): + valid_aliases = {m.alias for m in AVAILABLE_MODELS} + if req.model_alias not in valid_aliases: + raise HTTPException(status_code=400, detail="Invalid model alias") + + chat = Chat(user_id=user.id, title=req.title, model_alias=req.model_alias) + db.add(chat) + db.commit() + db.refresh(chat) + + return chat + +@router.get("/chats", response_model=List[ChatResponse]) +def list_chats( + db: DBSession = Depends(get_db), + user: User = Depends(get_current_user) +): + stmt = select(Chat).where(Chat.user_id == user.id).order_by(desc(Chat.updated_at)) + chats = db.scalars(stmt).all() + return chats + +@router.get("/chats/{chat_id}", response_model=ChatResponse) +def get_chat( + chat_id: str, + db: DBSession = Depends(get_db), + user: User = Depends(get_current_user) +): + chat = db.get(Chat, chat_id) + if not chat or chat.user_id != user.id: + raise HTTPException(status_code=404, detail="Chat not found") + return chat + +@router.delete("/chats/{chat_id}") +def delete_chat( + chat_id: str, + db: DBSession = Depends(get_db), + user: User = Depends(get_current_user) +): + chat = db.get(Chat, chat_id) + if not chat or chat.user_id != user.id: + raise HTTPException(status_code=404, detail="Chat not found") + + db.delete(chat) + db.commit() + return {"status": "ok"} + +@router.get("/chats/{chat_id}/messages", response_model=List[MessageResponse]) +def list_messages( + chat_id: str, + db: DBSession = Depends(get_db), + user: User = Depends(get_current_user) +): + chat = db.get(Chat, chat_id) + if not chat or chat.user_id != user.id: + raise HTTPException(status_code=404, detail="Chat not found") + + stmt = select(Message).where(Message.chat_id == chat_id).order_by(Message.created_at) + messages = db.scalars(stmt).all() + return messages + +from app.core.llm_client import llm_client, inference_lock + +def sanitize_llm_text(raw_text: Optional[str]) -> Optional[str]: + if not raw_text: + return None + text = raw_text.strip() + if not text: + return None + + cleaned = text.replace("", "").replace("", "").strip() + if not cleaned: + return None + + return cleaned + +def normalize_llm_response(content: str, reasoning: str) -> Optional[str]: + c_sanitized = sanitize_llm_text(content) + if c_sanitized: + return c_sanitized + + r_sanitized = sanitize_llm_text(reasoning) + if r_sanitized: + return r_sanitized + + return None + +@router.post("/chats/{chat_id}/messages", response_model=List[MessageResponse]) +async def add_message( + chat_id: str, + req: MessageCreateRequest, + db: DBSession = Depends(get_db), + user: User = Depends(get_current_user) +): + chat = db.get(Chat, chat_id) + if not chat or chat.user_id != user.id: + raise HTTPException(status_code=404, detail="Chat not found") + + # 1. Save user message + user_msg = Message(chat_id=chat.id, role=req.role, content=req.content) + db.add(user_msg) + + from datetime import datetime, timezone + chat.updated_at = datetime.now(timezone.utc).replace(tzinfo=None) + db.add(chat) + db.commit() + db.refresh(user_msg) + + logger.info(f"User message saved for chat {chat.id}. Selected model: {chat.model_alias}") + + # 2. Fetch recent chat history to assemble prompt + # Get last 20 messages + stmt = select(Message).where(Message.chat_id == chat_id).order_by(desc(Message.created_at)).limit(20) + recent_msgs = db.scalars(stmt).all() + recent_msgs.reverse() + + llm_history = [] + for m in recent_msgs: + llm_history.append({"role": m.role, "content": m.content}) + + # 3. Enter Critical Section for LLM Switch and Inference + ai_response = None + final_content = None + async with inference_lock: + try: + status_data = await llm_client.get_status() + current_model = status_data.get("active_model") + logger.info(f"Current active llm-manager model: {current_model}") + + # Switch if needed + switched = (current_model != chat.model_alias) + if switched: + logger.info(f"Switching model to {chat.model_alias}... (switch requested)") + await llm_client.switch_model(chat.model_alias) + logger.info(f"Successfully requested switch to {chat.model_alias}. Waiting for readiness...") + + # Wait for readiness + is_ready, iterations, final_status = await llm_client.wait_for_model_ready( + model_name=chat.model_alias, + timeout=60.0, + poll_interval=2.0 + ) + + if not is_ready: + logger.error(f"Readiness timeout for {chat.model_alias} after {iterations} iterations. Final status: {final_status}") + raise HTTPException(status_code=504, detail=f"LLM Manager readiness timeout for {chat.model_alias}") + + logger.info(f"Model {chat.model_alias} is ready after {iterations} iterations. Final status before completion: {final_status}") + + async def do_completion(msgs, max_tok=None, temp=None): + try: + return await llm_client.chat_completion(messages=msgs, max_tokens=max_tok, temperature=temp) + except HTTPException as e: + if e.status_code == 502 or "503" in str(e.detail): + logger.warning("Generation failed (possibly 503 unloading). Retrying switch and completion...") + await llm_client.switch_model(chat.model_alias) + return await llm_client.chat_completion(messages=msgs, max_tokens=max_tok, temperature=temp) + raise e + + # Call inference (Attempt 1) + logger.info("Starting chat completion (Attempt 1)...") + ai_response = await do_completion(llm_history) + + # Parse Attempt 1 + ai_choice = ai_response.get("choices", [{}])[0].get("message", {}) + ai_content_raw = ai_choice.get("content", "") or "" + ai_reasoning_raw = ai_choice.get("reasoning_content", "") or "" + + c_san = sanitize_llm_text(ai_content_raw) + r_san = sanitize_llm_text(ai_reasoning_raw) + final_content = normalize_llm_response(ai_content_raw, ai_reasoning_raw) + + logger.info( + f"LLM Stats (Attempt 1) | model: {chat.model_alias} | " + f"switched: {switched} | " + f"content_raw_len: {len(ai_content_raw)} | reasoning_raw_len: {len(ai_reasoning_raw)} | " + f"content_san_len: {len(c_san) if c_san else 0} | reasoning_san_len: {len(r_san) if r_san else 0}" + ) + + if not final_content: + logger.warning("Attempt 1 rejected: invalid response (both sanitized texts are empty). Triggering controlled retry.") + + retry_history = list(llm_history) + retry_history.append({ + "role": "user", + "content": "Ответь сразу финальным текстом. Не выводи reasoning, chain-of-thought, XML-теги или служебную разметку." + }) + + logger.info("Starting chat completion (Attempt 2 - Retry) with max_tokens=2048 and temperature=0.1...") + ai_response_retry = await do_completion(retry_history, max_tok=2048, temp=0.1) + + ai_choice_r = ai_response_retry.get("choices", [{}])[0].get("message", {}) + ai_content_r_raw = ai_choice_r.get("content", "") or "" + ai_reasoning_r_raw = ai_choice_r.get("reasoning_content", "") or "" + + c_san_r = sanitize_llm_text(ai_content_r_raw) + r_san_r = sanitize_llm_text(ai_reasoning_r_raw) + final_content = normalize_llm_response(ai_content_r_raw, ai_reasoning_r_raw) + + logger.info( + f"LLM Stats (Attempt 2 - Retry) | model: {chat.model_alias} | " + f"content_raw_len: {len(ai_content_r_raw)} | reasoning_raw_len: {len(ai_reasoning_r_raw)} | " + f"content_san_len: {len(c_san_r) if c_san_r else 0} | reasoning_san_len: {len(r_san_r) if r_san_r else 0}" + ) + + if not final_content: + logger.error("Attempt 2 also failed to produce valid output. Aborting.") + raise HTTPException(status_code=500, detail="LLM failed to produce valid output after retry.") + else: + logger.info("Attempt 2 succeeded in producing valid output.") + else: + if not ai_content_raw.strip() and final_content: + logger.info("Fallback to reasoning_content was chosen because 'content' was empty (Attempt 1).") + + except HTTPException: + raise + except Exception as e: + logger.error(f"Inference pipeline failed: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + # 5. Save AI message + assistant_msg = Message(chat_id=chat.id, role="assistant", content=final_content) + db.add(assistant_msg) + + chat.updated_at = datetime.now(timezone.utc).replace(tzinfo=None) + db.add(chat) + db.commit() + db.refresh(assistant_msg) + logger.info("Assistant message saved successfully.") + + return [user_msg, assistant_msg] diff --git a/backend/app/api/deps.py b/backend/app/api/deps.py new file mode 100644 index 0000000..4cdd6c0 --- /dev/null +++ b/backend/app/api/deps.py @@ -0,0 +1,30 @@ +import os +from datetime import datetime, timezone + +from fastapi import Depends, HTTPException, Request, status +from sqlalchemy.orm import Session as DBSession + +from app.db.session import get_db +from app.db.models import Session, User + +COOKIE_NAME = os.getenv("SESSION_COOKIE_NAME", "ai_chat_session") + +def get_current_user(request: Request, db: DBSession = Depends(get_db)) -> User: + session_id = request.cookies.get(COOKIE_NAME) + if not session_id: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated") + + db_session = db.get(Session, session_id) + if not db_session: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid session") + + if db_session.expires_at < datetime.now(timezone.utc).replace(tzinfo=None): + db.delete(db_session) + db.commit() + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Session expired") + + user = db_session.user + if not user or not user.is_active: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="User inactive") + + return user diff --git a/backend/app/api/health.py b/backend/app/api/health.py new file mode 100644 index 0000000..50a474f --- /dev/null +++ b/backend/app/api/health.py @@ -0,0 +1,20 @@ +from fastapi import APIRouter +from sqlalchemy import text +from sqlalchemy import create_engine + +from app.core.config import settings + +router = APIRouter() + + +@router.get("/health") +def health() -> dict: + return {"status": "ok", "service": "backend"} + + +@router.get("/ready") +def ready() -> dict: + engine = create_engine(settings.database_url, pool_pre_ping=True) + with engine.connect() as conn: + conn.execute(text("SELECT 1")) + return {"status": "ready", "database": "ok"} diff --git a/backend/app/core/config.py b/backend/app/core/config.py new file mode 100644 index 0000000..ddf3eed --- /dev/null +++ b/backend/app/core/config.py @@ -0,0 +1,39 @@ +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + model_config = SettingsConfigDict(extra="ignore") + + app_env: str = "dev" + app_host: str = "0.0.0.0" + app_port: int = 8000 + + database_url: str + + admin_bootstrap_login: str = "admin" + admin_bootstrap_password: str = "change_me_later" + + llm_manager_base_url: str + llm_manager_api_key: str + searxng_base_url: str + + upload_root: str = "/data/uploads" + temp_root: str = "/data/temp" + log_root: str = "/data/logs" + + max_image_mb: int = 10 + max_audio_mb: int = 25 + max_audio_duration_sec: int = 300 + max_message_chars: int = 16000 + + tts_ttl_hours: int = 4 + temp_audio_ttl_hours: int = 24 + orphan_file_grace_hours: int = 24 + + summary_trigger_message_count: int = 30 + summary_keep_recent_messages: int = 16 + summary_max_chars: int = 8000 + summary_model_alias: str = "qwen3.5-4b" + + +settings = Settings() diff --git a/backend/app/core/llm_client.py b/backend/app/core/llm_client.py new file mode 100644 index 0000000..354a6fa --- /dev/null +++ b/backend/app/core/llm_client.py @@ -0,0 +1,98 @@ +import httpx +import asyncio +from fastapi import HTTPException +from app.core.config import settings +import logging + +logger = logging.getLogger(__name__) + +# Global lock to prevent concurrent switches and generation requests +# This is safe for a single-worker MVP (uvicorn without --workers) +inference_lock = asyncio.Lock() + +class LLMClient: + def __init__(self): + self.base_url = settings.llm_manager_base_url.rstrip("/") + self.api_key = settings.llm_manager_api_key + self.headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + async def get_status(self): + """Fetch the current global state of llm-manager.""" + async with httpx.AsyncClient() as client: + try: + response = await client.get( + f"{self.base_url}/status", + headers=self.headers, + timeout=10.0 + ) + response.raise_for_status() + return response.json() + except httpx.HTTPError as e: + logger.error(f"Failed to fetch llm-manager status: {e}") + raise HTTPException(status_code=502, detail="llm-manager status check failed") + + async def switch_model(self, model_name: str): + """Request llm-manager to switch its active model.""" + async with httpx.AsyncClient() as client: + try: + logger.info(f"Requesting llm-manager switch to model: {model_name}") + response = await client.post( + f"{self.base_url}/switch/{model_name}", + headers=self.headers, + timeout=60.0 # Switching can take a while via LLM manager + ) + response.raise_for_status() + return response.json() + except httpx.HTTPError as e: + logger.error(f"Failed to switch model to {model_name}: {e}") + raise HTTPException(status_code=502, detail=f"Failed to switch model to {model_name}") + + async def wait_for_model_ready(self, model_name: str, timeout: float = 60.0, poll_interval: float = 2.0): + """Wait for the model to be active and not loading/unloading.""" + import time + start_time = time.time() + iterations = 0 + while time.time() - start_time < timeout: + iterations += 1 + status = await self.get_status() + current_model = status.get("active_model") + vram_state = status.get("vram_state", "") + + logger.info(f"Readiness poll #{iterations}: model={current_model}, vram_state={vram_state}") + + if current_model == model_name and vram_state not in ("loading", "unloading"): + return True, iterations, status + + await asyncio.sleep(poll_interval) + + return False, iterations, None + + async def chat_completion(self, messages: list, max_tokens: int = None, temperature: float = None): + """Generate response via llm-manager.""" + async with httpx.AsyncClient() as client: + try: + payload = { + "messages": messages, + "stream": False + } + if max_tokens is not None: + payload["max_tokens"] = max_tokens + if temperature is not None: + payload["temperature"] = temperature + + response = await client.post( + f"{self.base_url}/v1/chat/completions", + headers=self.headers, + json=payload, + timeout=120.0 + ) + response.raise_for_status() + return response.json() + except httpx.HTTPError as e: + logger.error(f"Failed to generate chat completion: {e}") + raise HTTPException(status_code=502, detail="Chat completion generation failed") + +llm_client = LLMClient() diff --git a/backend/app/core/models_catalog.py b/backend/app/core/models_catalog.py new file mode 100644 index 0000000..c36a831 --- /dev/null +++ b/backend/app/core/models_catalog.py @@ -0,0 +1,15 @@ +from typing import Optional +from pydantic import BaseModel + +class ModelInfo(BaseModel): + alias: str + name: str + vision_alias: Optional[str] = None + +# Defined curated list avoiding direct LLM integration dynamically +AVAILABLE_MODELS = [ + ModelInfo(alias="qwen3.5-4b", name="Qwen 3.5 4B", vision_alias="qwen3.5-4b-vl"), + ModelInfo(alias="qwen3.5-9b", name="Qwen 3.5 9B", vision_alias="qwen3.5-9b-vl"), + ModelInfo(alias="qwen2.5-coder-14b", name="Qwen 2.5 Coder 14B"), + ModelInfo(alias="a-vibe", name="A-Vibe"), +] diff --git a/backend/app/core/security.py b/backend/app/core/security.py new file mode 100644 index 0000000..13a7270 --- /dev/null +++ b/backend/app/core/security.py @@ -0,0 +1,13 @@ +from argon2 import PasswordHasher +from argon2.exceptions import VerifyMismatchError + +ph = PasswordHasher() + +def verify_password(plain_password: str, hashed_password: str) -> bool: + try: + return ph.verify(hashed_password, plain_password) + except VerifyMismatchError: + return False + +def get_password_hash(password: str) -> str: + return ph.hash(password) diff --git a/backend/app/db/base_class.py b/backend/app/db/base_class.py new file mode 100644 index 0000000..59be703 --- /dev/null +++ b/backend/app/db/base_class.py @@ -0,0 +1,3 @@ +from sqlalchemy.orm import declarative_base + +Base = declarative_base() diff --git a/backend/app/db/init_db.py b/backend/app/db/init_db.py new file mode 100644 index 0000000..1f59138 --- /dev/null +++ b/backend/app/db/init_db.py @@ -0,0 +1,37 @@ +import logging + +from sqlalchemy.orm import Session +from sqlalchemy import select + +from app.core.config import settings +from app.core.security import get_password_hash +from app.db.base_class import Base +from app.db.session import engine +from app.db.models import User + +logger = logging.getLogger(__name__) + +def init_db(db: Session) -> None: + # MVP: create tables if they don't exist + Base.metadata.create_all(bind=engine) + + bootstrap_admin(db) + +def bootstrap_admin(db: Session) -> None: + admin_login = settings.admin_bootstrap_login + admin_pass = settings.admin_bootstrap_password + + user = db.scalar(select(User).where(User.login == admin_login)) + if not user: + logger.info(f"Creating bootstrap admin user: {admin_login}") + hashed_password = get_password_hash(admin_pass) + admin_user = User( + login=admin_login, + hashed_password=hashed_password, + is_active=True, + ) + db.add(admin_user) + db.commit() + db.refresh(admin_user) + else: + logger.info(f"Admin user {admin_login} already exists") diff --git a/backend/app/db/models.py b/backend/app/db/models.py new file mode 100644 index 0000000..48f8623 --- /dev/null +++ b/backend/app/db/models.py @@ -0,0 +1,70 @@ +from datetime import datetime, timezone +import uuid + +from sqlalchemy import Column, String, Boolean, DateTime, ForeignKey, Text +from sqlalchemy.orm import relationship + +from app.db.base_class import Base + +def generate_uuid() -> str: + return uuid.uuid4().hex + +def utc_now() -> datetime: + return datetime.now(timezone.utc).replace(tzinfo=None) + +class User(Base): + __tablename__ = "users" + + id = Column(String, primary_key=True, index=True, default=generate_uuid) + login = Column(String, unique=True, index=True, nullable=False) + hashed_password = Column(String, nullable=False) + is_active = Column(Boolean, default=True) + + sessions = relationship("Session", back_populates="user", cascade="all, delete-orphan") + chats = relationship("Chat", back_populates="user", cascade="all, delete-orphan") + +class Session(Base): + __tablename__ = "sessions" + + id = Column(String, primary_key=True, index=True, default=generate_uuid) + user_id = Column(String, ForeignKey("users.id"), nullable=False, index=True) + expires_at = Column(DateTime, nullable=False) + + user = relationship("User", back_populates="sessions") + +class Chat(Base): + __tablename__ = "chats" + + id = Column(String, primary_key=True, index=True, default=generate_uuid) + user_id = Column(String, ForeignKey("users.id"), nullable=False, index=True) + title = Column(String, nullable=False, default="New Chat") + model_alias = Column(String, nullable=False) + created_at = Column(DateTime, nullable=False, default=utc_now) + updated_at = Column(DateTime, nullable=False, default=utc_now, onupdate=utc_now) + + user = relationship("User", back_populates="chats") + messages = relationship("Message", back_populates="chat", cascade="all, delete-orphan", order_by="Message.created_at") + +class Message(Base): + __tablename__ = "messages" + + id = Column(String, primary_key=True, index=True, default=generate_uuid) + chat_id = Column(String, ForeignKey("chats.id"), nullable=False, index=True) + role = Column(String, nullable=False) # system, user, assistant + content = Column(Text, nullable=False) + created_at = Column(DateTime, nullable=False, default=utc_now) + + chat = relationship("Chat", back_populates="messages") + attachments = relationship("Attachment", back_populates="message", cascade="all, delete-orphan") + +class Attachment(Base): + __tablename__ = "attachments" + + id = Column(String, primary_key=True, index=True, default=generate_uuid) + message_id = Column(String, ForeignKey("messages.id"), nullable=False, index=True) + filename = Column(String, nullable=False) + content_type = Column(String, nullable=False) + file_path = Column(String, nullable=False) + created_at = Column(DateTime, nullable=False, default=utc_now) + + message = relationship("Message", back_populates="attachments") diff --git a/backend/app/db/session.py b/backend/app/db/session.py new file mode 100644 index 0000000..a7b24b3 --- /dev/null +++ b/backend/app/db/session.py @@ -0,0 +1,14 @@ +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker + +from app.core.config import settings + +engine = create_engine(settings.database_url, pool_pre_ping=True) +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() diff --git a/backend/app/main.py b/backend/app/main.py new file mode 100644 index 0000000..c278fe6 --- /dev/null +++ b/backend/app/main.py @@ -0,0 +1,59 @@ +from pathlib import Path + +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + +from app.api.health import router as health_router +from app.api.auth import router as auth_router +from app.api.chats import router as chats_router +from app.core.config import settings +from app.db.init_db import init_db +from app.db.session import SessionLocal + + +def ensure_dirs() -> None: + for path in [settings.upload_root, settings.temp_root, settings.log_root]: + Path(path).mkdir(parents=True, exist_ok=True) + + +ensure_dirs() + +app = FastAPI( + title="AI Chat MVP Backend", + version="0.1.0", +) + + +@app.on_event("startup") +def on_startup(): + db = SessionLocal() + try: + init_db(db) + finally: + db.close() + + +app.add_middleware( + CORSMiddleware, + allow_origins=[ + "http://127.0.0.1:13000", + "http://localhost:13000", + "http://192.168.149.194:13000", + ], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +app.include_router(health_router, prefix="/api") +app.include_router(auth_router, prefix="/api/auth", tags=["auth"]) +app.include_router(chats_router, prefix="/api", tags=["chats"]) + + +@app.get("/") +def root() -> dict: + return { + "service": "ai-chat-backend", + "env": settings.app_env, + "docs": "/docs", + } diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 0000000..b2aa8fd --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,8 @@ +fastapi==0.115.8 +uvicorn[standard]==0.34.0 +pydantic==2.10.6 +pydantic-settings==2.8.1 +sqlalchemy==2.0.38 +psycopg[binary]==3.2.6 +argon2-cffi==23.1.0 +httpx diff --git a/cookies.txt b/cookies.txt new file mode 100644 index 0000000..bbb3ce1 --- /dev/null +++ b/cookies.txt @@ -0,0 +1,5 @@ +# Netscape HTTP Cookie File +# https://curl.se/docs/http-cookies.html +# This file was generated by libcurl! Edit at your own risk. + +#HttpOnly_127.0.0.1 FALSE / FALSE 1773687247 ai_chat_session 6a30ee77ead843569d08f2d47dda4b60 diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..6b7bf79 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,60 @@ +services: + postgres: + image: postgres:16 + container_name: ai-chat-postgres + restart: unless-stopped + environment: + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + volumes: + - postgres_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] + interval: 5s + timeout: 5s + retries: 20 + + backend: + build: + context: ./backend + container_name: ai-chat-backend + restart: unless-stopped + env_file: + - .env + environment: + APP_HOST: 0.0.0.0 + APP_PORT: 8000 + DATABASE_URL: postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} + depends_on: + postgres: + condition: service_healthy + volumes: + - app_uploads:/data/uploads + - app_temp:/data/temp + - app_logs:/data/logs + ports: + - "18000:8000" + + frontend: + build: + context: ./frontend + args: + NEXT_PUBLIC_API_BASE_URL: ${NEXT_PUBLIC_API_BASE_URL} + container_name: ai-chat-frontend + restart: unless-stopped + env_file: + - .env + environment: + NEXT_PUBLIC_API_BASE_URL: ${NEXT_PUBLIC_API_BASE_URL} + INTERNAL_API_URL: http://backend:8000 + depends_on: + - backend + ports: + - "13000:3000" + +volumes: + postgres_data: + app_uploads: + app_temp: + app_logs: diff --git a/docs/ui-mockups/01_login.jpg b/docs/ui-mockups/01_login.jpg new file mode 100644 index 0000000..b609693 Binary files /dev/null and b/docs/ui-mockups/01_login.jpg differ diff --git a/docs/ui-mockups/02_main-chat selection.jpg b/docs/ui-mockups/02_main-chat selection.jpg new file mode 100644 index 0000000..72695e8 Binary files /dev/null and b/docs/ui-mockups/02_main-chat selection.jpg differ diff --git a/docs/ui-mockups/03_new chat creation.jpg b/docs/ui-mockups/03_new chat creation.jpg new file mode 100644 index 0000000..bfe8cee Binary files /dev/null and b/docs/ui-mockups/03_new chat creation.jpg differ diff --git a/docs/ui-mockups/04_chat - text only.jpg b/docs/ui-mockups/04_chat - text only.jpg new file mode 100644 index 0000000..4f78510 Binary files /dev/null and b/docs/ui-mockups/04_chat - text only.jpg differ diff --git a/docs/ui-mockups/05_chat - text and picture.jpg b/docs/ui-mockups/05_chat - text and picture.jpg new file mode 100644 index 0000000..cdeeadf Binary files /dev/null and b/docs/ui-mockups/05_chat - text and picture.jpg differ diff --git a/docs/ui-mockups/06_chat - audio input.jpg b/docs/ui-mockups/06_chat - audio input.jpg new file mode 100644 index 0000000..999956d Binary files /dev/null and b/docs/ui-mockups/06_chat - audio input.jpg differ diff --git a/docs/ui-mockups/07_caht - TTS output.jpg b/docs/ui-mockups/07_caht - TTS output.jpg new file mode 100644 index 0000000..df48aa9 Binary files /dev/null and b/docs/ui-mockups/07_caht - TTS output.jpg differ diff --git a/docs/ui-mockups/08_chat - empty chat new.jpg b/docs/ui-mockups/08_chat - empty chat new.jpg new file mode 100644 index 0000000..65de42b Binary files /dev/null and b/docs/ui-mockups/08_chat - empty chat new.jpg differ diff --git a/docs/ui-mockups/09_chat - error - not available.jpg b/docs/ui-mockups/09_chat - error - not available.jpg new file mode 100644 index 0000000..1d0df99 Binary files /dev/null and b/docs/ui-mockups/09_chat - error - not available.jpg differ diff --git a/docs/ui-mockups/10 user admin.jpg b/docs/ui-mockups/10 user admin.jpg new file mode 100644 index 0000000..0dcc5dd Binary files /dev/null and b/docs/ui-mockups/10 user admin.jpg differ diff --git a/docs/ui-mockups/README.md b/docs/ui-mockups/README.md new file mode 100644 index 0000000..c90bc00 --- /dev/null +++ b/docs/ui-mockups/README.md @@ -0,0 +1,17 @@ +# UI Mockups + +Референсные экраны для реализации frontend MVP. + +## Список экранов +1. 01-login.jpg — экран логина +2. 02-main-chat-selection.jpg — главная / список чатов +3. 03-new-chat-creation.jpg — создание нового чата +4. 04-chat-text-only.jpg — текстовый чат +5. 05-chat-text-and-picture.jpg — чат с картинкой +6. 06-chat-audio-input.jpg — голосовой ввод / аудио +7. 07-chat-tts-output.jpg — ответ с TTS +8. 08-chat-empty-new.jpg — пустое состояние +9. 09-chat-error-not-available.jpg — ошибка сервиса/модели +10. 10-user-admin.jpg — базовая админка пользователей + +Использовать как визуальный референс. Не воспринимать как pixel-perfect спецификацию. diff --git a/frontend/Dockerfile b/frontend/Dockerfile new file mode 100644 index 0000000..c26d057 --- /dev/null +++ b/frontend/Dockerfile @@ -0,0 +1,17 @@ +FROM node:20-alpine + +WORKDIR /app + +COPY package.json package-lock.json* ./ +RUN npm install + +COPY . . + +ARG NEXT_PUBLIC_API_BASE_URL +ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL + +RUN npm run build + +EXPOSE 3000 + +CMD ["npm", "start"] diff --git a/frontend/app/chat/[id]/ChatView.tsx b/frontend/app/chat/[id]/ChatView.tsx new file mode 100644 index 0000000..3afc0aa --- /dev/null +++ b/frontend/app/chat/[id]/ChatView.tsx @@ -0,0 +1,248 @@ +"use client"; + +import { useState, useRef, useEffect } from "react"; + +interface Message { + id: string; + role: string; + content: string; + created_at: string; +} + +interface Chat { + id: string; + title: string; + model_alias: string; +} + +const API_BASE_URL = process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + +export default function ChatView({ chat, initialMessages }: { chat: Chat, initialMessages: Message[] }) { + const [messages, setMessages] = useState(initialMessages); + const [input, setInput] = useState(""); + const [isSending, setIsSending] = useState(false); + const [isGenerating, setIsGenerating] = useState(false); + const scrollRef = useRef(null); + + useEffect(() => { + if (scrollRef.current) { + scrollRef.current.scrollTop = scrollRef.current.scrollHeight; + } + }, [messages, isGenerating]); // Also scroll when isGenerating changes + + const handleSend = async (e: React.FormEvent) => { + e.preventDefault(); + if (!input.trim() || isSending || isGenerating) return; + + setIsSending(true); + const content = input; + setInput(""); + + // Eagerly show user message map + const tempUserMsg: Message = { + id: "temp-" + Date.now(), + role: "user", + content: content, + created_at: new Date().toISOString() + }; + setMessages(prev => [...prev, tempUserMsg]); + setIsGenerating(true); + + try { + const res = await fetch(`${API_BASE_URL}/api/chats/${chat.id}/messages`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + credentials: "include", + body: JSON.stringify({ content, role: "user" }), + }); + + if (!res.ok) throw new Error("Failed to send message"); + + const data = await res.json(); + if (!Array.isArray(data) || data.length < 2) { + throw new Error("Invalid response format from server"); + } + const [userMsg, assistantMsg] = data; + + // Replace the temp user msg and append the actual user and assistant messages from backend + setMessages(prev => [...prev.filter(m => m.id !== tempUserMsg.id), userMsg, assistantMsg]); + + } catch (err) { + console.error(err); + alert("Failed to send message: " + err); + setInput(content); // restore input + setMessages(prev => prev.filter(m => m.id !== tempUserMsg.id)); // Remove failed message + } finally { + setIsSending(false); + setIsGenerating(false); + } + }; + + return ( +
+ {/* Dynamic Chat Header */} +
+

{chat.title}

+
+ + + + + + Модель: {chat.model_alias} +
+
+ + {/* Messages Scroll Area */} +
+ {messages.length === 0 ? ( +
+ Напишите первое сообщение, чтобы начать диалог. +
+ ) : ( +
+ {messages.map(msg => { + const isUser = msg.role === 'user'; + return ( +
+
+ + {isUser ? 'Вы' : 'AI Помощник'} + + + {new Date(msg.created_at).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })} + +
+
+ {msg.content} +
+
+ ); + })} + + {/* Loading Indicator for AI Response */} + {isGenerating && ( +
+
+ + AI Помощник + +
+
+ Печатает... +
+
+ )} + +
+ )} +
+ + {/* Composer */} +
+
+ setInput(e.target.value)} + placeholder="Спросите о чем-нибудь..." + style={{ + flex: 1, + padding: '14px 110px 14px 20px', + borderRadius: 24, + border: "1px solid var(--border-color)", + backgroundColor: "var(--bg-color)", + fontSize: 15, + outline: "none", + transition: "border-color 0.2s" + }} + disabled={isSending} + onFocus={e => e.target.style.borderColor = 'var(--primary)'} + onBlur={e => e.target.style.borderColor = 'var(--border-color)'} + /> + +
+ + +
+
+
+
+ ); +} diff --git a/frontend/app/chat/[id]/page.tsx b/frontend/app/chat/[id]/page.tsx new file mode 100644 index 0000000..167f3ae --- /dev/null +++ b/frontend/app/chat/[id]/page.tsx @@ -0,0 +1,115 @@ +import { cookies } from "next/headers"; +import { redirect } from "next/navigation"; +import Link from "next/link"; +import ChatView from "./ChatView"; +import Layout from "../../../components/Layout"; + +async function getAuthState() { + const baseUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + const cookieStore = await cookies(); + const sessionCookie = cookieStore.get("ai_chat_session"); + + if (!sessionCookie) return null; + + try { + const res = await fetch(`${baseUrl}/api/auth/me`, { + headers: { + Cookie: `${sessionCookie.name}=${sessionCookie.value}` + }, + cache: "no-store", + }); + if (!res.ok) return null; + return await res.json(); + } catch { + return null; + } +} + +async function getChat(id: string) { + const baseUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + const cookieStore = await cookies(); + const sessionCookie = cookieStore.get("ai_chat_session"); + + if (!sessionCookie) return null; + + try { + const res = await fetch(`${baseUrl}/api/chats/${id}`, { + headers: { + Cookie: `${sessionCookie.name}=${sessionCookie.value}` + }, + cache: "no-store", + }); + if (!res.ok) return null; + return await res.json(); + } catch { + return null; + } +} + +async function getMessages(id: string) { + const baseUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + const cookieStore = await cookies(); + const sessionCookie = cookieStore.get("ai_chat_session"); + + if (!sessionCookie) return []; + + try { + const res = await fetch(`${baseUrl}/api/chats/${id}/messages`, { + headers: { + Cookie: `${sessionCookie.name}=${sessionCookie.value}` + }, + cache: "no-store", + }); + if (!res.ok) return []; + return await res.json(); + } catch { + return []; + } +} + +async function getChats() { + const baseUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + const cookieStore = await cookies(); + const sessionCookie = cookieStore.get("ai_chat_session"); + + if (!sessionCookie) return []; + + try { + const res = await fetch(`${baseUrl}/api/chats`, { + headers: { Cookie: `${sessionCookie.name}=${sessionCookie.value}` }, + cache: "no-store", + }); + if (!res.ok) return []; + return await res.json(); + } catch { + return []; + } +} + +export default async function ChatPage({ params }: { params: Promise<{ id: string }> }) { + const auth = await getAuthState(); + if (!auth) { + redirect("/login"); + } + + const { id } = await params; + + const chat = await getChat(id); + if (!chat) { + return ( +
+

Chat Not Found

+ Back to Home +
+ ); + } + + const messages = await getMessages(id); + const chats = await getChats(); + + return ( + + + + ); +} diff --git a/frontend/app/globals.css b/frontend/app/globals.css new file mode 100644 index 0000000..f2fb927 --- /dev/null +++ b/frontend/app/globals.css @@ -0,0 +1,36 @@ +@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap'); + +:root { + --bg-color: #f5f6f8; + --panel-bg: #ffffff; + --text-main: #333333; + --text-muted: #888888; + --border-color: #e5e7eb; + --primary: #4a76a8; + --primary-hover: #3d628f; + --sidebar-w: 260px; +} + +body, html { + margin: 0; + padding: 0; + font-family: 'Inter', sans-serif; + background-color: var(--bg-color); + color: var(--text-main); + height: 100vh; + box-sizing: border-box; +} + +* { + box-sizing: inherit; +} + +a { + text-decoration: none; + color: inherit; +} + +button { + cursor: pointer; + font-family: 'Inter', sans-serif; +} diff --git a/frontend/app/layout.tsx b/frontend/app/layout.tsx new file mode 100644 index 0000000..6037605 --- /dev/null +++ b/frontend/app/layout.tsx @@ -0,0 +1,18 @@ +import "./globals.css"; + +export const metadata = { + title: "AI Chat MVP", + description: "AI Chat MVP", +}; + +export default function RootLayout({ + children, +}: { + children: React.ReactNode; +}) { + return ( + + {children} + + ); +} diff --git a/frontend/app/login/page.tsx b/frontend/app/login/page.tsx new file mode 100644 index 0000000..7d33644 --- /dev/null +++ b/frontend/app/login/page.tsx @@ -0,0 +1,268 @@ +"use client"; + +import { useState } from "react"; +import { useRouter } from "next/navigation"; + +const API_BASE_URL = + process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + +export default function LoginPage() { + const [login, setLogin] = useState(""); + const [password, setPassword] = useState(""); + const [error, setError] = useState(""); + const [showPassword, setShowPassword] = useState(false); + const [isSubmitting, setIsSubmitting] = useState(false); + const router = useRouter(); + + const handleSubmit = async (e) => { + e.preventDefault(); + setError(""); + setIsSubmitting(true); + + try { + const res = await fetch(`${API_BASE_URL}/api/auth/login`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + credentials: "include", + body: JSON.stringify({ login, password }), + }); + + if (!res.ok) { + let message = "Неверный логин или пароль"; + try { + const data = await res.json(); + if (data?.detail && typeof data.detail === "string") { + message = data.detail; + } + } catch { + // ignore malformed/non-json body + } + throw new Error(message); + } + + router.push("/"); + router.refresh(); + } catch (err) { + setError(err?.message || "Ошибка входа"); + } finally { + setIsSubmitting(false); + } + }; + + return ( +
+
+ + + + + AI Chat MVP +
+ +
+

+ Вход в систему +

+ +
+
+ + setLogin(e.target.value)} + type="text" + placeholder="admin" + required + style={{ + width: "100%", + padding: "12px 16px", + borderRadius: "8px", + border: "1px solid var(--border-color)", + fontSize: "15px", + outline: "none", + transition: "border-color 0.2s", + }} + onFocus={(e) => (e.target.style.borderColor = "var(--primary)")} + onBlur={(e) => + (e.target.style.borderColor = "var(--border-color)") + } + /> +
+ +
+
+ + +
+ + setPassword(e.target.value)} + type={showPassword ? "text" : "password"} + placeholder="••••••••" + required + style={{ + width: "100%", + padding: "12px 16px", + borderRadius: "8px", + border: "1px solid var(--border-color)", + fontSize: "15px", + outline: "none", + transition: "border-color 0.2s", + }} + onFocus={(e) => (e.target.style.borderColor = "var(--primary)")} + onBlur={(e) => + (e.target.style.borderColor = "var(--border-color)") + } + /> +
+ + {error ? ( +
+ {error} +
+ ) : null} + + +
+ +
+ Вход доступен только для пользователей, созданных администратором. +
+
+
+ ); +} diff --git a/frontend/app/page.tsx b/frontend/app/page.tsx new file mode 100644 index 0000000..b5d1d79 --- /dev/null +++ b/frontend/app/page.tsx @@ -0,0 +1,67 @@ +import { cookies } from "next/headers"; +import { redirect } from "next/navigation"; +import Layout from "../components/Layout"; + +async function getAuthState() { + const baseUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + const cookieStore = await cookies(); + const sessionCookie = cookieStore.get("ai_chat_session"); + + if (!sessionCookie) return null; + + try { + const res = await fetch(`${baseUrl}/api/auth/me`, { + headers: { + Cookie: `${sessionCookie.name}=${sessionCookie.value}` + }, + cache: "no-store" + }); + if (!res.ok) return null; + return await res.json(); + } catch { + return null; + } +} + +async function getChats() { + const baseUrl = process.env.INTERNAL_API_URL || process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + const cookieStore = await cookies(); + const sessionCookie = cookieStore.get("ai_chat_session"); + + if (!sessionCookie) return []; + + try { + const res = await fetch(`${baseUrl}/api/chats`, { + headers: { + Cookie: `${sessionCookie.name}=${sessionCookie.value}` + }, + cache: "no-store" + }); + if (!res.ok) return []; + return await res.json(); + } catch { + return []; + } +} + +export default async function Home() { + const auth = await getAuthState(); + if (!auth) { + redirect("/login"); + } + + const chats = await getChats(); + + return ( + +
+ + + + +
Чем я могу помочь?
+
Выберите чат в меню слева или создайте новый.
+
+
+ ); +} diff --git a/frontend/components/Layout.tsx b/frontend/components/Layout.tsx new file mode 100644 index 0000000..f2592a5 --- /dev/null +++ b/frontend/components/Layout.tsx @@ -0,0 +1,228 @@ +"use client"; + +import { useState, useEffect } from "react"; +import Link from "next/link"; +import { usePathname, useRouter } from "next/navigation"; + +interface Chat { + id: string; + title: string; + model_alias: string; + updated_at: string; +} + +interface User { + login: string; +} + +const API_BASE_URL = process.env.NEXT_PUBLIC_API_BASE_URL || "http://127.0.0.1:18000"; + +// Global modal state since Sidebar and Topbar both might need logic, but let's keep it simple +export default function Layout({ children, chats: initialChats, user }: { children: React.ReactNode, chats: Chat[], user: User }) { + const [chats, setChats] = useState(initialChats); + const [models, setModels] = useState<{alias: string, name: string}[]>([]); + const pathname = usePathname(); + const router = useRouter(); + + const [isModalOpen, setIsModalOpen] = useState(false); + const [newChatTitle, setNewChatTitle] = useState(""); + const [newChatModel, setNewChatModel] = useState(""); + + useEffect(() => { + fetch(`${API_BASE_URL}/api/models`) + .then(res => res.json()) + .then(data => { + setModels(data); + if (data.length > 0) setNewChatModel(data[0].alias); + }) + .catch(console.error); + }, []); + + const handleLogout = async () => { + await fetch(`${API_BASE_URL}/api/auth/logout`, { method: "POST" }); + window.location.href = "/"; + }; + + const createChat = async (e: React.FormEvent) => { + e.preventDefault(); + if (!newChatTitle.trim() || !newChatModel) return; + + try { + const res = await fetch(`${API_BASE_URL}/api/chats`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + credentials: "include", + body: JSON.stringify({ title: newChatTitle, model_alias: newChatModel }), + }); + if (res.ok) { + const chat = await res.json(); + setChats([chat, ...chats]); + setIsModalOpen(false); + setNewChatTitle(""); + router.push(`/chat/${chat.id}`); + } + } catch (err) { + console.error("Failed to create chat", err); + } + }; + + return ( +
+ {/* Top Bar */} +
+
+ AI Chat MVP +
+
+
+
+ {user.login.charAt(0).toUpperCase()} +
+ {user.login} +
+ +
+
+ + {/* Main Container */} +
+ + {/* Sidebar */} + + + {/* Main Content Area */} +
+ {children} +
+
+ + {/* New Chat Modal */} + {isModalOpen && ( +
+
+

Новый чат

+
+
+ + setNewChatTitle(e.target.value)} + placeholder="Введите название..." + required + style={{ width: '100%', padding: '10px 14px', borderRadius: 8, border: '1px solid var(--border-color)', fontSize: 14, outline: 'none' }} + /> +
+
+ + +
+ +
+ + +
+
+
+
+ )} +
+ ); +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..9377e7e --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,19 @@ +{ + "name": "ai-chat-mvp-frontend", + "private": true, + "scripts": { + "dev": "next dev -H 0.0.0.0 -p 3000", + "build": "next build", + "start": "next start -p 3000" + }, + "dependencies": { + "next": "15.2.0", + "react": "19.0.0", + "react-dom": "19.0.0" + }, + "devDependencies": { + "@types/node": "^20", + "@types/react": "^19", + "typescript": "^5" + } +}