fix(core): stabilize editor lifecycle, transactional versions, and runtime config

This commit is contained in:
greebo
2026-03-20 12:38:10 +03:00
parent 0f9c2a1cbd
commit 239b32a246
17 changed files with 1224 additions and 457 deletions

View File

@@ -15,19 +15,15 @@ Backend for SVG scheme upload, draft editing, pricing, diagnostics, publish prev
Default backend port: `9020` Default backend port: `9020`
Health check: Health check:
- `GET /healthz` - `GET /healthz`
Main API prefix: Main API prefix:
- `/api/v1` - `/api/v1`
Auth header: Auth header:
- `X-API-Key` - `X-API-Key`
Default local admin key: Default local admin key:
- `admin-local-dev-key` - `admin-local-dev-key`
## Core lifecycle ## Core lifecycle
@@ -254,9 +250,11 @@ on every mutation route.
## Regression ## Regression
Main operator regression: Main operator regressions:
- `backend/scripts/smoke_regression.sh` - `backend/scripts/smoke_regression.sh`
- `backend/scripts/editor_mutation_regression.sh`
Run: Run:
`API_URL=http://127.0.0.1:9020 API_KEY=admin-local-dev-key SCHEME_ID=... ./backend/scripts/smoke_regression.sh` `API_URL=http://127.0.0.1:9020 API_KEY=admin-local-dev-key SCHEME_ID=... ./backend/scripts/smoke_regression.sh`
`API_URL=http://127.0.0.1:9020 API_KEY=admin-local-dev-key SCHEME_ID=... ./backend/scripts/editor_mutation_regression.sh`

View File

@@ -1,4 +1,4 @@
from fastapi import APIRouter, Depends, Query from fastapi import APIRouter, Depends, Query, Request
from app.core.config import settings from app.core.config import settings
from app.repositories.audit import create_audit_event from app.repositories.audit import create_audit_event
@@ -508,6 +508,7 @@ async def delete_draft_group(
@router.patch(f"{settings.api_v1_prefix}/schemes/{{scheme_id}}/draft/seats/records/{{seat_record_id}}", response_model=SeatPatchResponse) @router.patch(f"{settings.api_v1_prefix}/schemes/{{scheme_id}}/draft/seats/records/{{seat_record_id}}", response_model=SeatPatchResponse)
async def patch_draft_seat( async def patch_draft_seat(
request: Request,
scheme_id: str, scheme_id: str,
seat_record_id: str, seat_record_id: str,
payload: SeatPatchRequest, payload: SeatPatchRequest,
@@ -530,14 +531,20 @@ async def patch_draft_seat(
group_id=payload.group_id, group_id=payload.group_id,
) )
raw_json = await request.json()
update_data = {k: v for k, v in payload.model_dump(exclude_unset=True).items() if k in raw_json}
for field in ("seat_id", "sector_id", "group_id"):
if field in update_data and (update_data[field] is None or update_data[field] == ""):
from app.services.api_errors import raise_unprocessable
raise_unprocessable(
code="business_identifier_nullification_forbidden",
message=f"{field} cannot be nullified or explicitly cleared",
)
row = await update_scheme_version_seat_by_record_id( row = await update_scheme_version_seat_by_record_id(
scheme_version_id=version.scheme_version_id, scheme_version_id=version.scheme_version_id,
seat_record_id=seat_record_id, seat_record_id=seat_record_id,
seat_id=payload.seat_id, **update_data,
sector_id=payload.sector_id,
group_id=payload.group_id,
row_label=payload.row_label,
seat_number=payload.seat_number,
) )
await create_audit_event( await create_audit_event(
@@ -569,6 +576,7 @@ async def patch_draft_seat(
@router.post(f"{settings.api_v1_prefix}/schemes/{{scheme_id}}/draft/seats/bulk", response_model=BulkSeatPatchResponse) @router.post(f"{settings.api_v1_prefix}/schemes/{{scheme_id}}/draft/seats/bulk", response_model=BulkSeatPatchResponse)
async def bulk_patch_draft_seats( async def bulk_patch_draft_seats(
request: Request,
scheme_id: str, scheme_id: str,
payload: BulkSeatPatchRequest, payload: BulkSeatPatchRequest,
expected_scheme_version_id: str | None = Query(default=None), expected_scheme_version_id: str | None = Query(default=None),
@@ -579,7 +587,20 @@ async def bulk_patch_draft_seats(
expected_scheme_version_id=expected_scheme_version_id, expected_scheme_version_id=expected_scheme_version_id,
) )
items = [item.model_dump() for item in payload.items] raw_json = await request.json()
items = []
for i, item in enumerate(payload.items):
item_raw = raw_json.get("items", [])[i] if "items" in raw_json else {}
items.append({k: item.model_dump(exclude_unset=True).get(k) for k in item_raw})
for item in items:
for field in ("seat_id", "sector_id", "group_id"):
if field in item and (item[field] is None or item[field] == ""):
from app.services.api_errors import raise_unprocessable
raise_unprocessable(
code="business_identifier_nullification_forbidden",
message=f"{field} cannot be nullified or explicitly cleared",
)
await validate_bulk_seat_patch_uniqueness( await validate_bulk_seat_patch_uniqueness(
scheme_version_id=version.scheme_version_id, scheme_version_id=version.scheme_version_id,
items=items, items=items,
@@ -625,6 +646,7 @@ async def bulk_patch_draft_seats(
@router.patch(f"{settings.api_v1_prefix}/schemes/{{scheme_id}}/draft/sectors/records/{{sector_record_id}}", response_model=SectorPatchResponse) @router.patch(f"{settings.api_v1_prefix}/schemes/{{scheme_id}}/draft/sectors/records/{{sector_record_id}}", response_model=SectorPatchResponse)
async def patch_draft_sector( async def patch_draft_sector(
request: Request,
scheme_id: str, scheme_id: str,
sector_record_id: str, sector_record_id: str,
payload: SectorPatchRequest, payload: SectorPatchRequest,
@@ -642,20 +664,28 @@ async def patch_draft_sector(
new_sector_id=payload.sector_id, new_sector_id=payload.sector_id,
) )
raw_json = await request.json()
update_data = {k: v for k, v in payload.model_dump(exclude_unset=True).items() if k in raw_json}
for field in ("sector_id",):
if field in update_data and (update_data[field] is None or update_data[field] == ""):
from app.services.api_errors import raise_unprocessable
raise_unprocessable(
code="business_identifier_nullification_forbidden",
message=f"{field} cannot be nullified or explicitly cleared",
)
row, old_sector_id = await update_scheme_version_sector_by_record_id( row, old_sector_id = await update_scheme_version_sector_by_record_id(
scheme_version_id=version.scheme_version_id, scheme_version_id=version.scheme_version_id,
sector_record_id=sector_record_id, sector_record_id=sector_record_id,
sector_id=payload.sector_id, **update_data,
name=payload.name,
)
cascaded_count = await cascade_update_seat_sector_reference(
scheme_version_id=version.scheme_version_id,
old_sector_id=old_sector_id,
new_sector_id=payload.sector_id,
)
repair_result = await repair_structure_references(
scheme_version_id=version.scheme_version_id,
) )
cascaded_count = 0
if "sector_id" in update_data and update_data["sector_id"] and update_data["sector_id"] != old_sector_id:
cascaded_count = await cascade_update_seat_sector_reference(
scheme_version_id=version.scheme_version_id,
old_sector_id=old_sector_id,
new_sector_id=update_data["sector_id"],
)
await create_audit_event( await create_audit_event(
scheme_id=scheme.scheme_id, scheme_id=scheme.scheme_id,
@@ -668,7 +698,6 @@ async def patch_draft_sector(
"new_sector_id": payload.sector_id, "new_sector_id": payload.sector_id,
"name": payload.name, "name": payload.name,
"cascaded_seats_count": cascaded_count, "cascaded_seats_count": cascaded_count,
"repair_result": repair_result,
}, },
) )
@@ -683,6 +712,7 @@ async def patch_draft_sector(
@router.patch(f"{settings.api_v1_prefix}/schemes/{{scheme_id}}/draft/groups/records/{{group_record_id}}", response_model=GroupPatchResponse) @router.patch(f"{settings.api_v1_prefix}/schemes/{{scheme_id}}/draft/groups/records/{{group_record_id}}", response_model=GroupPatchResponse)
async def patch_draft_group( async def patch_draft_group(
request: Request,
scheme_id: str, scheme_id: str,
group_record_id: str, group_record_id: str,
payload: GroupPatchRequest, payload: GroupPatchRequest,
@@ -700,20 +730,28 @@ async def patch_draft_group(
new_group_id=payload.group_id, new_group_id=payload.group_id,
) )
raw_json = await request.json()
update_data = {k: v for k, v in payload.model_dump(exclude_unset=True).items() if k in raw_json}
for field in ("group_id",):
if field in update_data and (update_data[field] is None or update_data[field] == ""):
from app.services.api_errors import raise_unprocessable
raise_unprocessable(
code="business_identifier_nullification_forbidden",
message=f"{field} cannot be nullified or explicitly cleared",
)
row, old_group_id = await update_scheme_version_group_by_record_id( row, old_group_id = await update_scheme_version_group_by_record_id(
scheme_version_id=version.scheme_version_id, scheme_version_id=version.scheme_version_id,
group_record_id=group_record_id, group_record_id=group_record_id,
group_id=payload.group_id, **update_data,
name=payload.name,
)
cascaded_count = await cascade_update_seat_group_reference(
scheme_version_id=version.scheme_version_id,
old_group_id=old_group_id,
new_group_id=payload.group_id,
)
repair_result = await repair_structure_references(
scheme_version_id=version.scheme_version_id,
) )
cascaded_count = 0
if "group_id" in update_data and update_data["group_id"] and update_data["group_id"] != old_group_id:
cascaded_count = await cascade_update_seat_group_reference(
scheme_version_id=version.scheme_version_id,
old_group_id=old_group_id,
new_group_id=update_data["group_id"],
)
await create_audit_event( await create_audit_event(
scheme_id=scheme.scheme_id, scheme_id=scheme.scheme_id,
@@ -726,7 +764,6 @@ async def patch_draft_group(
"new_group_id": payload.group_id, "new_group_id": payload.group_id,
"name": payload.name, "name": payload.name,
"cascaded_seats_count": cascaded_count, "cascaded_seats_count": cascaded_count,
"repair_result": repair_result,
}, },
) )

View File

@@ -2,12 +2,10 @@ from fastapi import APIRouter, Depends, Query
from app.core.config import settings from app.core.config import settings
from app.repositories.audit import create_audit_event from app.repositories.audit import create_audit_event
from app.repositories.scheme_groups import clone_scheme_version_groups
from app.repositories.scheme_seats import clone_scheme_version_seats
from app.repositories.scheme_sectors import clone_scheme_version_sectors
from app.repositories.scheme_versions import ( from app.repositories.scheme_versions import (
count_scheme_versions, count_scheme_versions,
create_next_scheme_version_from_current, create_next_scheme_version_from_current_checked,
ensure_draft_scheme_version_consistent,
get_current_scheme_version, get_current_scheme_version,
list_scheme_versions, list_scheme_versions,
) )
@@ -34,26 +32,12 @@ from app.schemas.scheme_versions import (
SchemeVersionListResponse, SchemeVersionListResponse,
) )
from app.security.auth import require_api_key from app.security.auth import require_api_key
from app.services.api_errors import raise_conflict
from app.services.publish_service import publish_current_draft_scheme from app.services.publish_service import publish_current_draft_scheme
from app.services.scheme_validation import build_scheme_validation_report from app.services.scheme_validation import build_scheme_validation_report
router = APIRouter() router = APIRouter()
def _build_stale_current_version_detail(
*,
expected_scheme_version_id: str,
actual_scheme_version_id: str,
) -> dict:
return {
"code": "stale_current_version",
"message": "Current scheme version changed. Reload scheme state before creating a new version.",
"expected_scheme_version_id": expected_scheme_version_id,
"actual_scheme_version_id": actual_scheme_version_id,
}
@router.get(f"{settings.api_v1_prefix}/schemes", response_model=SchemeListResponse) @router.get(f"{settings.api_v1_prefix}/schemes", response_model=SchemeListResponse)
async def get_schemes( async def get_schemes(
limit: int = Query(default=50, ge=1, le=200), limit: int = Query(default=50, ge=1, le=200),
@@ -155,36 +139,9 @@ async def create_next_scheme_version_endpoint(
expected_current_scheme_version_id: str | None = Query(default=None), expected_current_scheme_version_id: str | None = Query(default=None),
role: str = Depends(require_api_key), role: str = Depends(require_api_key),
): ):
current_scheme = await get_scheme_record_by_scheme_id(scheme_id) current_version, new_version = await create_next_scheme_version_from_current_checked(
current_version = await get_current_scheme_version( scheme_id=scheme_id,
scheme_id=current_scheme.scheme_id, expected_current_scheme_version_id=expected_current_scheme_version_id,
current_version_number=current_scheme.current_version_number,
)
if (
expected_current_scheme_version_id
and expected_current_scheme_version_id != current_version.scheme_version_id
):
raise_conflict(
_build_stale_current_version_detail(
expected_scheme_version_id=expected_current_scheme_version_id,
actual_scheme_version_id=current_version.scheme_version_id,
)
)
new_version = await create_next_scheme_version_from_current(scheme_id)
await clone_scheme_version_sectors(
source_scheme_version_id=current_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
)
await clone_scheme_version_groups(
source_scheme_version_id=current_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
)
await clone_scheme_version_seats(
source_scheme_version_id=current_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
) )
await create_audit_event( await create_audit_event(
@@ -214,26 +171,14 @@ async def ensure_draft_scheme_version(
expected_current_scheme_version_id: str | None = Query(default=None), expected_current_scheme_version_id: str | None = Query(default=None),
role: str = Depends(require_api_key), role: str = Depends(require_api_key),
): ):
scheme = await get_scheme_record_by_scheme_id(scheme_id) current_version, created, source_scheme_version_id = await ensure_draft_scheme_version_consistent(
current_version = await get_current_scheme_version( scheme_id=scheme_id,
scheme_id=scheme.scheme_id, expected_current_scheme_version_id=expected_current_scheme_version_id,
current_version_number=scheme.current_version_number,
) )
if ( if not created:
expected_current_scheme_version_id
and expected_current_scheme_version_id != current_version.scheme_version_id
):
raise_conflict(
_build_stale_current_version_detail(
expected_scheme_version_id=expected_current_scheme_version_id,
actual_scheme_version_id=current_version.scheme_version_id,
)
)
if scheme.status == "draft" and current_version.status == "draft":
return EnsureDraftResponse( return EnsureDraftResponse(
scheme_id=scheme.scheme_id, scheme_id=current_version.scheme_id,
scheme_version_id=current_version.scheme_version_id, scheme_version_id=current_version.scheme_version_id,
version_number=current_version.version_number, version_number=current_version.version_number,
status=current_version.status, status=current_version.status,
@@ -242,42 +187,27 @@ async def ensure_draft_scheme_version(
source_scheme_version_id=None, source_scheme_version_id=None,
) )
new_version = await create_next_scheme_version_from_current(scheme_id)
await clone_scheme_version_sectors(
source_scheme_version_id=current_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
)
await clone_scheme_version_groups(
source_scheme_version_id=current_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
)
await clone_scheme_version_seats(
source_scheme_version_id=current_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
)
await create_audit_event( await create_audit_event(
scheme_id=scheme_id, scheme_id=scheme_id,
event_type="scheme.version.created", event_type="scheme.version.created",
object_type="scheme_version", object_type="scheme_version",
object_ref=new_version.scheme_version_id, object_ref=current_version.scheme_version_id,
details={ details={
"source_scheme_version_id": current_version.scheme_version_id, "source_scheme_version_id": source_scheme_version_id,
"version_number": new_version.version_number, "version_number": current_version.version_number,
"normalized_storage_path": new_version.normalized_storage_path, "normalized_storage_path": current_version.normalized_storage_path,
"reason": "ensure_draft", "reason": "ensure_draft",
}, },
) )
return EnsureDraftResponse( return EnsureDraftResponse(
scheme_id=new_version.scheme_id, scheme_id=current_version.scheme_id,
scheme_version_id=new_version.scheme_version_id, scheme_version_id=current_version.scheme_version_id,
version_number=new_version.version_number, version_number=current_version.version_number,
status=new_version.status, status=current_version.status,
normalized_storage_path=new_version.normalized_storage_path, normalized_storage_path=current_version.normalized_storage_path,
created=True, created=True,
source_scheme_version_id=current_version.scheme_version_id, source_scheme_version_id=source_scheme_version_id,
) )

View File

@@ -10,8 +10,7 @@ from app.repositories.scheme_artifacts import create_scheme_artifact
from app.repositories.scheme_groups import replace_scheme_version_groups from app.repositories.scheme_groups import replace_scheme_version_groups
from app.repositories.scheme_seats import replace_scheme_version_seats from app.repositories.scheme_seats import replace_scheme_version_seats
from app.repositories.scheme_sectors import replace_scheme_version_sectors from app.repositories.scheme_sectors import replace_scheme_version_sectors
from app.repositories.scheme_versions import create_initial_scheme_version from app.repositories.schemes import create_scheme_from_upload_with_initial_version
from app.repositories.schemes import create_scheme_from_upload
from app.repositories.uploads import ( from app.repositories.uploads import (
count_upload_records, count_upload_records,
create_upload_record, create_upload_record,
@@ -202,17 +201,9 @@ async def upload_scheme_svg(
processing_status="completed", processing_status="completed",
) )
scheme_id = await create_scheme_from_upload( scheme_id, scheme_version_id = await create_scheme_from_upload_with_initial_version(
source_upload_id=upload_id, source_upload_id=upload_id,
name=Path(filename).stem or filename, name=Path(filename).stem or filename,
normalized_elements_count=summary["elements_count"],
normalized_seats_count=summary["seats_count"],
normalized_groups_count=summary["groups_count"],
normalized_sectors_count=summary["sectors_count"],
)
scheme_version_id = await create_initial_scheme_version(
scheme_id=scheme_id,
normalized_storage_path=normalized_storage_path, normalized_storage_path=normalized_storage_path,
normalized_elements_count=summary["elements_count"], normalized_elements_count=summary["elements_count"],
normalized_seats_count=summary["seats_count"], normalized_seats_count=summary["seats_count"],

View File

@@ -1,29 +1,32 @@
from pydantic import Field, model_validator
from pydantic_settings import BaseSettings, SettingsConfigDict from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings): class Settings(BaseSettings):
app_name: str = "svg-service" app_name: str = Field(..., validation_alias="APP_NAME")
app_env: str = "development" app_env: str = Field(..., validation_alias="APP_ENV")
app_port: int = 9020 app_port: int = Field(..., validation_alias="BACKEND_PORT")
api_v1_prefix: str = "/api/v1" api_v1_prefix: str = Field(..., validation_alias="API_V1_PREFIX")
auth_header_name: str = "X-API-Key" auth_header_name: str = Field(..., validation_alias="AUTH_HEADER_NAME")
admin_api_key: str = "admin-local-dev-key" api_keys_admin: str = Field(..., validation_alias="API_KEYS_ADMIN")
viewer_api_key: str = "viewer-local-dev-key" api_keys_operator: str = Field(..., validation_alias="API_KEYS_OPERATOR")
api_keys_viewer: str = Field(..., validation_alias="API_KEYS_VIEWER")
postgres_host: str = "postgres" postgres_host: str = Field(..., validation_alias="POSTGRES_HOST")
postgres_port: int = 5432 postgres_port: int = Field(..., validation_alias="POSTGRES_PORT")
postgres_db: str = "svg_service" postgres_db: str = Field(..., validation_alias="POSTGRES_DB")
postgres_user: str = "svg_service" postgres_user: str = Field(..., validation_alias="POSTGRES_USER")
postgres_password: str = "svg_service_dev_password" postgres_password: str = Field(..., validation_alias="POSTGRES_PASSWORD")
database_url_raw: str | None = Field(default=None, validation_alias="DATABASE_URL")
svg_max_file_size_bytes: int = 10 * 1024 * 1024 svg_max_file_size_bytes: int = Field(10 * 1024 * 1024, validation_alias="SVG_MAX_FILE_SIZE_BYTES")
svg_max_elements: int = 25000 svg_max_elements: int = Field(25000, validation_alias="SVG_MAX_ELEMENTS")
svg_allow_internal_use_references_only: bool = True svg_allow_internal_use_references_only: bool = Field(True, validation_alias="SVG_ALLOW_INTERNAL_USE_REFERENCES_ONLY")
svg_forbid_foreign_object_v1: bool = True svg_forbid_foreign_object_v1: bool = Field(True, validation_alias="SVG_FORBID_FOREIGN_OBJECT_V1")
svg_forbid_style_v1: bool = False svg_forbid_style_v1: bool = Field(False, validation_alias="SVG_FORBID_STYLE_V1")
svg_forbid_image_v1: bool = True svg_forbid_image_v1: bool = Field(True, validation_alias="SVG_FORBID_IMAGE_V1")
svg_display_enabled: bool = True svg_display_enabled: bool = True
svg_display_mode: str = "passthrough" svg_display_mode: str = "passthrough"
@@ -34,7 +37,7 @@ class Settings(BaseSettings):
svg_display_force_viewbox: bool = True svg_display_force_viewbox: bool = True
svg_display_technical_text_patterns: str = "debug,tech,helper,tmp,service" svg_display_technical_text_patterns: str = "debug,tech,helper,tmp,service"
storage_root_dir: str = "/data" storage_root_dir: str = Field(..., validation_alias="STORAGE_ROOT")
publish_preview_retention_per_variant: int = 2 publish_preview_retention_per_variant: int = 2
publish_require_full_pricing_coverage: bool = False publish_require_full_pricing_coverage: bool = False
@@ -45,16 +48,32 @@ class Settings(BaseSettings):
extra="ignore", extra="ignore",
) )
@model_validator(mode="after")
def validate_database_config(self) -> "Settings":
assembled_database_url = (
f"postgresql+asyncpg://{self.postgres_user}:{self.postgres_password}"
f"@{self.postgres_host}:{self.postgres_port}/{self.postgres_db}"
)
if self.database_url_raw and self.database_url_raw != assembled_database_url:
raise ValueError("DATABASE_URL must match POSTGRES_HOST/PORT/DB/USER/PASSWORD")
return self
@property @property
def admin_keys(self) -> set[str]: def admin_keys(self) -> set[str]:
return {item.strip() for item in self.admin_api_key.split(",") if item.strip()} return {item.strip() for item in self.api_keys_admin.split(",") if item.strip()}
@property
def operator_keys(self) -> set[str]:
return {item.strip() for item in self.api_keys_operator.split(",") if item.strip()}
@property @property
def viewer_keys(self) -> set[str]: def viewer_keys(self) -> set[str]:
return {item.strip() for item in self.viewer_api_key.split(",") if item.strip()} return {item.strip() for item in self.api_keys_viewer.split(",") if item.strip()}
@property @property
def database_url(self) -> str: def database_url(self) -> str:
if self.database_url_raw:
return self.database_url_raw
return ( return (
f"postgresql+asyncpg://{self.postgres_user}:{self.postgres_password}" f"postgresql+asyncpg://{self.postgres_user}:{self.postgres_password}"
f"@{self.postgres_host}:{self.postgres_port}/{self.postgres_db}" f"@{self.postgres_host}:{self.postgres_port}/{self.postgres_db}"

View File

@@ -8,6 +8,49 @@ from app.models.scheme_group import SchemeGroupRecord
from app.models.scheme_seat import SchemeSeatRecord from app.models.scheme_seat import SchemeSeatRecord
def _conflict(message: str) -> HTTPException:
return HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"code": "group_uniqueness_violation",
"message": message,
},
)
async def _ensure_group_uniqueness(
*,
session,
scheme_version_id: str,
group_id: str | None,
element_id: str | None,
exclude_group_record_id: str | None = None,
) -> None:
if group_id:
stmt = select(SchemeGroupRecord).where(
SchemeGroupRecord.scheme_version_id == scheme_version_id,
SchemeGroupRecord.group_id == group_id,
)
if exclude_group_record_id:
stmt = stmt.where(SchemeGroupRecord.group_record_id != exclude_group_record_id)
existing = (await session.execute(stmt)).scalar_one_or_none()
if existing is not None:
raise _conflict(f"Group with group_id='{group_id}' already exists in current draft version")
if element_id:
stmt = select(SchemeGroupRecord).where(
SchemeGroupRecord.scheme_version_id == scheme_version_id,
SchemeGroupRecord.element_id == element_id,
)
if exclude_group_record_id:
stmt = stmt.where(SchemeGroupRecord.group_record_id != exclude_group_record_id)
existing = (await session.execute(stmt)).scalar_one_or_none()
if existing is not None:
raise _conflict(f"Group with element_id='{element_id}' already exists in current draft version")
async def replace_scheme_version_groups( async def replace_scheme_version_groups(
*, *,
scheme_id: str, scheme_id: str,
@@ -23,13 +66,29 @@ async def replace_scheme_version_groups(
for row in existing_rows: for row in existing_rows:
await session.delete(row) await session.delete(row)
seen_group_ids: set[str] = set()
seen_element_ids: set[str] = set()
for item in groups: for item in groups:
group_id = item.get("group_id")
element_id = item.get("id")
if group_id:
if group_id in seen_group_ids:
raise _conflict(f"Duplicate group_id='{group_id}' in replacement payload")
seen_group_ids.add(group_id)
if element_id:
if element_id in seen_element_ids:
raise _conflict(f"Duplicate element_id='{element_id}' in replacement payload")
seen_element_ids.add(element_id)
row = SchemeGroupRecord( row = SchemeGroupRecord(
group_record_id=item["group_record_id"] if "group_record_id" in item and item["group_record_id"] else uuid4().hex, group_record_id=item["group_record_id"] if "group_record_id" in item and item["group_record_id"] else uuid4().hex,
scheme_id=scheme_id, scheme_id=scheme_id,
scheme_version_id=scheme_version_id, scheme_version_id=scheme_version_id,
element_id=item.get("id"), element_id=element_id,
group_id=item.get("group_id"), group_id=group_id,
name=item.get("group_id"), name=item.get("group_id"),
classes_raw=str(item.get("classes")), classes_raw=str(item.get("classes")),
) )
@@ -44,26 +103,51 @@ async def clone_scheme_version_groups(
target_scheme_version_id: str, target_scheme_version_id: str,
) -> None: ) -> None:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( await clone_scheme_version_groups_in_session(
select(SchemeGroupRecord).where(SchemeGroupRecord.scheme_version_id == source_scheme_version_id) session=session,
source_scheme_version_id=source_scheme_version_id,
target_scheme_version_id=target_scheme_version_id,
) )
rows = list(result.scalars().all())
for row in rows:
cloned = SchemeGroupRecord(
group_record_id=uuid4().hex,
scheme_id=row.scheme_id,
scheme_version_id=target_scheme_version_id,
element_id=row.element_id,
group_id=row.group_id,
name=row.name,
classes_raw=row.classes_raw,
)
session.add(cloned)
await session.commit() await session.commit()
async def clone_scheme_version_groups_in_session(
*,
session,
source_scheme_version_id: str,
target_scheme_version_id: str,
) -> None:
result = await session.execute(
select(SchemeGroupRecord).where(SchemeGroupRecord.scheme_version_id == source_scheme_version_id)
)
rows = list(result.scalars().all())
seen_group_ids: set[str] = set()
seen_element_ids: set[str] = set()
for row in rows:
if row.group_id:
if row.group_id in seen_group_ids:
raise _conflict(f"Duplicate group_id='{row.group_id}' while cloning draft")
seen_group_ids.add(row.group_id)
if row.element_id:
if row.element_id in seen_element_ids:
raise _conflict(f"Duplicate element_id='{row.element_id}' while cloning draft")
seen_element_ids.add(row.element_id)
cloned = SchemeGroupRecord(
group_record_id=uuid4().hex,
scheme_id=row.scheme_id,
scheme_version_id=target_scheme_version_id,
element_id=row.element_id,
group_id=row.group_id,
name=row.name,
classes_raw=row.classes_raw,
)
session.add(cloned)
async def list_scheme_version_groups(scheme_version_id: str) -> list[SchemeGroupRecord]: async def list_scheme_version_groups(scheme_version_id: str) -> list[SchemeGroupRecord]:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( result = await session.execute(
@@ -78,8 +162,7 @@ async def update_scheme_version_group_by_record_id(
*, *,
scheme_version_id: str, scheme_version_id: str,
group_record_id: str, group_record_id: str,
group_id: str | None, **update_data,
name: str | None,
) -> tuple[SchemeGroupRecord, str | None]: ) -> tuple[SchemeGroupRecord, str | None]:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( result = await session.execute(
@@ -96,9 +179,20 @@ async def update_scheme_version_group_by_record_id(
detail="Group record not found in current draft version", detail="Group record not found in current draft version",
) )
if "group_id" in update_data:
await _ensure_group_uniqueness(
session=session,
scheme_version_id=scheme_version_id,
group_id=update_data["group_id"],
element_id=row.element_id,
exclude_group_record_id=group_record_id,
)
old_group_id = row.group_id old_group_id = row.group_id
row.group_id = group_id if "group_id" in update_data:
row.name = name row.group_id = update_data["group_id"]
if "name" in update_data:
row.name = update_data["name"]
await session.commit() await session.commit()
await session.refresh(row) await session.refresh(row)
@@ -115,6 +209,13 @@ async def create_scheme_version_group(
classes_raw: str | None, classes_raw: str | None,
) -> SchemeGroupRecord: ) -> SchemeGroupRecord:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
await _ensure_group_uniqueness(
session=session,
scheme_version_id=scheme_version_id,
group_id=group_id,
element_id=element_id,
)
row = SchemeGroupRecord( row = SchemeGroupRecord(
group_record_id=uuid4().hex, group_record_id=uuid4().hex,
scheme_id=scheme_id, scheme_id=scheme_id,

View File

@@ -51,36 +51,48 @@ async def clone_scheme_version_seats(
target_scheme_version_id: str, target_scheme_version_id: str,
) -> None: ) -> None:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( await clone_scheme_version_seats_in_session(
select(SchemeSeatRecord).where(SchemeSeatRecord.scheme_version_id == source_scheme_version_id) session=session,
source_scheme_version_id=source_scheme_version_id,
target_scheme_version_id=target_scheme_version_id,
) )
rows = list(result.scalars().all())
for row in rows:
cloned = SchemeSeatRecord(
seat_record_id=__import__("uuid").uuid4().hex,
scheme_id=row.scheme_id,
scheme_version_id=target_scheme_version_id,
element_id=row.element_id,
seat_id=row.seat_id,
sector_id=row.sector_id,
group_id=row.group_id,
row_label=row.row_label,
seat_number=row.seat_number,
tag=row.tag,
classes_raw=row.classes_raw,
x=row.x,
y=row.y,
cx=row.cx,
cy=row.cy,
width=row.width,
height=row.height,
)
session.add(cloned)
await session.commit() await session.commit()
async def clone_scheme_version_seats_in_session(
*,
session,
source_scheme_version_id: str,
target_scheme_version_id: str,
) -> None:
result = await session.execute(
select(SchemeSeatRecord).where(SchemeSeatRecord.scheme_version_id == source_scheme_version_id)
)
rows = list(result.scalars().all())
for row in rows:
cloned = SchemeSeatRecord(
seat_record_id=__import__("uuid").uuid4().hex,
scheme_id=row.scheme_id,
scheme_version_id=target_scheme_version_id,
element_id=row.element_id,
seat_id=row.seat_id,
sector_id=row.sector_id,
group_id=row.group_id,
row_label=row.row_label,
seat_number=row.seat_number,
tag=row.tag,
classes_raw=row.classes_raw,
x=row.x,
y=row.y,
cx=row.cx,
cy=row.cy,
width=row.width,
height=row.height,
)
session.add(cloned)
async def list_scheme_version_seats(scheme_version_id: str) -> list[SchemeSeatRecord]: async def list_scheme_version_seats(scheme_version_id: str) -> list[SchemeSeatRecord]:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( result = await session.execute(
@@ -141,11 +153,7 @@ async def update_scheme_version_seat_by_record_id(
*, *,
scheme_version_id: str, scheme_version_id: str,
seat_record_id: str, seat_record_id: str,
seat_id: str | None, **update_data,
sector_id: str | None,
group_id: str | None,
row_label: str | None,
seat_number: str | None,
) -> SchemeSeatRecord: ) -> SchemeSeatRecord:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( result = await session.execute(
@@ -162,11 +170,16 @@ async def update_scheme_version_seat_by_record_id(
detail="Seat record not found in current draft version", detail="Seat record not found in current draft version",
) )
row.seat_id = seat_id if "seat_id" in update_data:
row.sector_id = sector_id row.seat_id = update_data["seat_id"]
row.group_id = group_id if "sector_id" in update_data:
row.row_label = row_label row.sector_id = update_data["sector_id"]
row.seat_number = seat_number if "group_id" in update_data:
row.group_id = update_data["group_id"]
if "row_label" in update_data:
row.row_label = update_data["row_label"]
if "seat_number" in update_data:
row.seat_number = update_data["seat_number"]
await session.commit() await session.commit()
await session.refresh(row) await session.refresh(row)
@@ -196,11 +209,16 @@ async def bulk_update_scheme_version_seats_by_record_id(
detail=f"Seat record not found in current draft version: {item['seat_record_id']}", detail=f"Seat record not found in current draft version: {item['seat_record_id']}",
) )
row.seat_id = item.get("seat_id") if "seat_id" in item:
row.sector_id = item.get("sector_id") row.seat_id = item["seat_id"]
row.group_id = item.get("group_id") if "sector_id" in item:
row.row_label = item.get("row_label") row.sector_id = item["sector_id"]
row.seat_number = item.get("seat_number") if "group_id" in item:
row.group_id = item["group_id"]
if "row_label" in item:
row.row_label = item["row_label"]
if "seat_number" in item:
row.seat_number = item["seat_number"]
updated_rows.append(row) updated_rows.append(row)
await session.commit() await session.commit()

View File

@@ -8,6 +8,49 @@ from app.models.scheme_sector import SchemeSectorRecord
from app.models.scheme_seat import SchemeSeatRecord from app.models.scheme_seat import SchemeSeatRecord
def _conflict(message: str) -> HTTPException:
return HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"code": "sector_uniqueness_violation",
"message": message,
},
)
async def _ensure_sector_uniqueness(
*,
session,
scheme_version_id: str,
sector_id: str | None,
element_id: str | None,
exclude_sector_record_id: str | None = None,
) -> None:
if sector_id:
stmt = select(SchemeSectorRecord).where(
SchemeSectorRecord.scheme_version_id == scheme_version_id,
SchemeSectorRecord.sector_id == sector_id,
)
if exclude_sector_record_id:
stmt = stmt.where(SchemeSectorRecord.sector_record_id != exclude_sector_record_id)
existing = (await session.execute(stmt)).scalar_one_or_none()
if existing is not None:
raise _conflict(f"Sector with sector_id='{sector_id}' already exists in current draft version")
if element_id:
stmt = select(SchemeSectorRecord).where(
SchemeSectorRecord.scheme_version_id == scheme_version_id,
SchemeSectorRecord.element_id == element_id,
)
if exclude_sector_record_id:
stmt = stmt.where(SchemeSectorRecord.sector_record_id != exclude_sector_record_id)
existing = (await session.execute(stmt)).scalar_one_or_none()
if existing is not None:
raise _conflict(f"Sector with element_id='{element_id}' already exists in current draft version")
async def replace_scheme_version_sectors( async def replace_scheme_version_sectors(
*, *,
scheme_id: str, scheme_id: str,
@@ -23,13 +66,29 @@ async def replace_scheme_version_sectors(
for row in existing_rows: for row in existing_rows:
await session.delete(row) await session.delete(row)
seen_sector_ids: set[str] = set()
seen_element_ids: set[str] = set()
for item in sectors: for item in sectors:
sector_id = item.get("sector_id")
element_id = item.get("id")
if sector_id:
if sector_id in seen_sector_ids:
raise _conflict(f"Duplicate sector_id='{sector_id}' in replacement payload")
seen_sector_ids.add(sector_id)
if element_id:
if element_id in seen_element_ids:
raise _conflict(f"Duplicate element_id='{element_id}' in replacement payload")
seen_element_ids.add(element_id)
row = SchemeSectorRecord( row = SchemeSectorRecord(
sector_record_id=item["sector_record_id"] if "sector_record_id" in item and item["sector_record_id"] else uuid4().hex, sector_record_id=item["sector_record_id"] if "sector_record_id" in item and item["sector_record_id"] else uuid4().hex,
scheme_id=scheme_id, scheme_id=scheme_id,
scheme_version_id=scheme_version_id, scheme_version_id=scheme_version_id,
element_id=item.get("id"), element_id=element_id,
sector_id=item.get("sector_id"), sector_id=sector_id,
name=item.get("sector_id"), name=item.get("sector_id"),
classes_raw=str(item.get("classes")), classes_raw=str(item.get("classes")),
) )
@@ -44,26 +103,51 @@ async def clone_scheme_version_sectors(
target_scheme_version_id: str, target_scheme_version_id: str,
) -> None: ) -> None:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( await clone_scheme_version_sectors_in_session(
select(SchemeSectorRecord).where(SchemeSectorRecord.scheme_version_id == source_scheme_version_id) session=session,
source_scheme_version_id=source_scheme_version_id,
target_scheme_version_id=target_scheme_version_id,
) )
rows = list(result.scalars().all())
for row in rows:
cloned = SchemeSectorRecord(
sector_record_id=uuid4().hex,
scheme_id=row.scheme_id,
scheme_version_id=target_scheme_version_id,
element_id=row.element_id,
sector_id=row.sector_id,
name=row.name,
classes_raw=row.classes_raw,
)
session.add(cloned)
await session.commit() await session.commit()
async def clone_scheme_version_sectors_in_session(
*,
session,
source_scheme_version_id: str,
target_scheme_version_id: str,
) -> None:
result = await session.execute(
select(SchemeSectorRecord).where(SchemeSectorRecord.scheme_version_id == source_scheme_version_id)
)
rows = list(result.scalars().all())
seen_sector_ids: set[str] = set()
seen_element_ids: set[str] = set()
for row in rows:
if row.sector_id:
if row.sector_id in seen_sector_ids:
raise _conflict(f"Duplicate sector_id='{row.sector_id}' while cloning draft")
seen_sector_ids.add(row.sector_id)
if row.element_id:
if row.element_id in seen_element_ids:
raise _conflict(f"Duplicate element_id='{row.element_id}' while cloning draft")
seen_element_ids.add(row.element_id)
cloned = SchemeSectorRecord(
sector_record_id=uuid4().hex,
scheme_id=row.scheme_id,
scheme_version_id=target_scheme_version_id,
element_id=row.element_id,
sector_id=row.sector_id,
name=row.name,
classes_raw=row.classes_raw,
)
session.add(cloned)
async def list_scheme_version_sectors(scheme_version_id: str) -> list[SchemeSectorRecord]: async def list_scheme_version_sectors(scheme_version_id: str) -> list[SchemeSectorRecord]:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( result = await session.execute(
@@ -78,8 +162,7 @@ async def update_scheme_version_sector_by_record_id(
*, *,
scheme_version_id: str, scheme_version_id: str,
sector_record_id: str, sector_record_id: str,
sector_id: str | None, **update_data,
name: str | None,
) -> tuple[SchemeSectorRecord, str | None]: ) -> tuple[SchemeSectorRecord, str | None]:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( result = await session.execute(
@@ -96,9 +179,20 @@ async def update_scheme_version_sector_by_record_id(
detail="Sector record not found in current draft version", detail="Sector record not found in current draft version",
) )
if "sector_id" in update_data:
await _ensure_sector_uniqueness(
session=session,
scheme_version_id=scheme_version_id,
sector_id=update_data["sector_id"],
element_id=row.element_id,
exclude_sector_record_id=sector_record_id,
)
old_sector_id = row.sector_id old_sector_id = row.sector_id
row.sector_id = sector_id if "sector_id" in update_data:
row.name = name row.sector_id = update_data["sector_id"]
if "name" in update_data:
row.name = update_data["name"]
await session.commit() await session.commit()
await session.refresh(row) await session.refresh(row)
@@ -115,6 +209,13 @@ async def create_scheme_version_sector(
classes_raw: str | None, classes_raw: str | None,
) -> SchemeSectorRecord: ) -> SchemeSectorRecord:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
await _ensure_sector_uniqueness(
session=session,
scheme_version_id=scheme_version_id,
sector_id=sector_id,
element_id=element_id,
)
row = SchemeSectorRecord( row = SchemeSectorRecord(
sector_record_id=uuid4().hex, sector_record_id=uuid4().hex,
scheme_id=scheme_id, scheme_id=scheme_id,

View File

@@ -7,6 +7,125 @@ from sqlalchemy import asc, desc, func, select
from app.db.session import AsyncSessionLocal from app.db.session import AsyncSessionLocal
from app.models.scheme import SchemeRecord from app.models.scheme import SchemeRecord
from app.models.scheme_version import SchemeVersionRecord from app.models.scheme_version import SchemeVersionRecord
from app.repositories.scheme_groups import clone_scheme_version_groups_in_session
from app.repositories.scheme_seats import clone_scheme_version_seats_in_session
from app.repositories.scheme_sectors import clone_scheme_version_sectors_in_session
from app.services.api_errors import raise_conflict
def _raise_current_version_inconsistent(*, scheme_id: str, current_version_number: int) -> None:
raise_conflict(
code="current_version_inconsistent",
message="Scheme current version pointer is inconsistent with scheme_versions state.",
details={
"scheme_id": scheme_id,
"current_version_number": current_version_number,
},
)
def _raise_stale_current_version(*, expected_scheme_version_id: str, actual_scheme_version_id: str) -> None:
raise_conflict(
code="stale_current_version",
message="Current scheme version changed. Reload scheme state before creating a new version.",
details={
"expected_scheme_version_id": expected_scheme_version_id,
"actual_scheme_version_id": actual_scheme_version_id,
},
)
async def _get_scheme_for_update(session, scheme_id: str) -> SchemeRecord:
scheme_result = await session.execute(
select(SchemeRecord)
.where(SchemeRecord.scheme_id == scheme_id)
.with_for_update()
)
scheme = scheme_result.scalar_one_or_none()
if scheme is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Scheme not found",
)
return scheme
async def _get_current_scheme_version_for_update(
session,
*,
scheme_id: str,
current_version_number: int,
) -> SchemeVersionRecord:
current_result = await session.execute(
select(SchemeVersionRecord)
.where(
SchemeVersionRecord.scheme_id == scheme_id,
SchemeVersionRecord.version_number == current_version_number,
)
.with_for_update()
)
current_version = current_result.scalar_one_or_none()
if current_version is None:
_raise_current_version_inconsistent(
scheme_id=scheme_id,
current_version_number=current_version_number,
)
return current_version
async def _build_next_draft_version(
session,
*,
scheme: SchemeRecord,
source_version: SchemeVersionRecord,
) -> SchemeVersionRecord:
max_version_result = await session.execute(
select(func.coalesce(func.max(SchemeVersionRecord.version_number), 0)).where(
SchemeVersionRecord.scheme_id == scheme.scheme_id
)
)
next_version_number = int(max_version_result.scalar_one()) + 1
new_version = SchemeVersionRecord(
scheme_version_id=uuid4().hex,
scheme_id=scheme.scheme_id,
version_number=next_version_number,
status="draft",
normalized_storage_path=source_version.normalized_storage_path,
normalized_elements_count=source_version.normalized_elements_count,
normalized_seats_count=source_version.normalized_seats_count,
normalized_groups_count=source_version.normalized_groups_count,
normalized_sectors_count=source_version.normalized_sectors_count,
display_svg_storage_path=source_version.display_svg_storage_path,
display_svg_status=source_version.display_svg_status,
display_svg_generated_at=source_version.display_svg_generated_at,
)
session.add(new_version)
await session.flush()
await clone_scheme_version_sectors_in_session(
session=session,
source_scheme_version_id=source_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
)
await clone_scheme_version_groups_in_session(
session=session,
source_scheme_version_id=source_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
)
await clone_scheme_version_seats_in_session(
session=session,
source_scheme_version_id=source_version.scheme_version_id,
target_scheme_version_id=new_version.scheme_version_id,
)
scheme.current_version_number = new_version.version_number
scheme.status = "draft"
scheme.published_at = None
scheme.normalized_elements_count = source_version.normalized_elements_count
scheme.normalized_seats_count = source_version.normalized_seats_count
scheme.normalized_groups_count = source_version.normalized_groups_count
scheme.normalized_sectors_count = source_version.normalized_sectors_count
return new_version
async def create_initial_scheme_version( async def create_initial_scheme_version(
@@ -75,9 +194,9 @@ async def get_current_scheme_version(scheme_id: str, current_version_number: int
row = result.scalar_one_or_none() row = result.scalar_one_or_none()
if row is None: if row is None:
raise HTTPException( _raise_current_version_inconsistent(
status_code=status.HTTP_404_NOT_FOUND, scheme_id=scheme_id,
detail="Current scheme version not found", current_version_number=current_version_number,
) )
return row return row
@@ -113,57 +232,87 @@ async def update_scheme_version_display_artifact(
async def create_next_scheme_version_from_current(scheme_id: str) -> SchemeVersionRecord: async def create_next_scheme_version_from_current(scheme_id: str) -> SchemeVersionRecord:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
scheme_result = await session.execute( async with session.begin():
select(SchemeRecord).where(SchemeRecord.scheme_id == scheme_id) scheme = await _get_scheme_for_update(session, scheme_id)
) current_version = await _get_current_scheme_version_for_update(
scheme = scheme_result.scalar_one_or_none() session,
scheme_id=scheme.scheme_id,
if scheme is None: current_version_number=scheme.current_version_number,
raise HTTPException( )
status_code=status.HTTP_404_NOT_FOUND, new_version = await _build_next_draft_version(
detail="Scheme not found", session,
scheme=scheme,
source_version=current_version,
) )
current_result = await session.execute(
select(SchemeVersionRecord).where(
SchemeVersionRecord.scheme_id == scheme.scheme_id,
SchemeVersionRecord.version_number == scheme.current_version_number,
)
)
current_version = current_result.scalar_one_or_none()
if current_version is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Current scheme version not found",
)
next_version_number = current_version.version_number + 1
new_version = SchemeVersionRecord(
scheme_version_id=uuid4().hex,
scheme_id=scheme.scheme_id,
version_number=next_version_number,
status="draft",
normalized_storage_path=current_version.normalized_storage_path,
normalized_elements_count=current_version.normalized_elements_count,
normalized_seats_count=current_version.normalized_seats_count,
normalized_groups_count=current_version.normalized_groups_count,
normalized_sectors_count=current_version.normalized_sectors_count,
display_svg_storage_path=current_version.display_svg_storage_path,
display_svg_status=current_version.display_svg_status,
display_svg_generated_at=current_version.display_svg_generated_at,
)
session.add(new_version)
scheme.current_version_number = next_version_number
scheme.status = "draft"
scheme.published_at = None
scheme.normalized_elements_count = current_version.normalized_elements_count
scheme.normalized_seats_count = current_version.normalized_seats_count
scheme.normalized_groups_count = current_version.normalized_groups_count
scheme.normalized_sectors_count = current_version.normalized_sectors_count
await session.commit()
await session.refresh(new_version) await session.refresh(new_version)
return new_version return new_version
async def create_next_scheme_version_from_current_checked(
*,
scheme_id: str,
expected_current_scheme_version_id: str | None = None,
) -> tuple[SchemeVersionRecord, SchemeVersionRecord]:
async with AsyncSessionLocal() as session:
async with session.begin():
scheme = await _get_scheme_for_update(session, scheme_id)
current_version = await _get_current_scheme_version_for_update(
session,
scheme_id=scheme.scheme_id,
current_version_number=scheme.current_version_number,
)
if (
expected_current_scheme_version_id
and expected_current_scheme_version_id != current_version.scheme_version_id
):
_raise_stale_current_version(
expected_scheme_version_id=expected_current_scheme_version_id,
actual_scheme_version_id=current_version.scheme_version_id,
)
new_version = await _build_next_draft_version(
session,
scheme=scheme,
source_version=current_version,
)
await session.refresh(current_version)
await session.refresh(new_version)
return current_version, new_version
async def ensure_draft_scheme_version_consistent(
*,
scheme_id: str,
expected_current_scheme_version_id: str | None = None,
) -> tuple[SchemeVersionRecord, bool, str | None]:
async with AsyncSessionLocal() as session:
async with session.begin():
scheme = await _get_scheme_for_update(session, scheme_id)
current_version = await _get_current_scheme_version_for_update(
session,
scheme_id=scheme.scheme_id,
current_version_number=scheme.current_version_number,
)
if (
expected_current_scheme_version_id
and expected_current_scheme_version_id != current_version.scheme_version_id
):
_raise_stale_current_version(
expected_scheme_version_id=expected_current_scheme_version_id,
actual_scheme_version_id=current_version.scheme_version_id,
)
if scheme.status == "draft" and current_version.status == "draft":
await session.refresh(current_version)
return current_version, False, None
new_version = await _build_next_draft_version(
session,
scheme=scheme,
source_version=current_version,
)
source_scheme_version_id = current_version.scheme_version_id
await session.refresh(new_version)
return new_version, True, source_scheme_version_id

View File

@@ -6,6 +6,51 @@ from sqlalchemy import desc, func, select
from app.db.session import AsyncSessionLocal from app.db.session import AsyncSessionLocal
from app.models.scheme import SchemeRecord from app.models.scheme import SchemeRecord
from app.models.scheme_version import SchemeVersionRecord from app.models.scheme_version import SchemeVersionRecord
from app.services.api_errors import raise_conflict
def _raise_current_version_inconsistent(*, scheme_id: str, current_version_number: int) -> None:
raise_conflict(
code="current_version_inconsistent",
message="Scheme current version pointer is inconsistent with scheme_versions state.",
details={
"scheme_id": scheme_id,
"current_version_number": current_version_number,
},
)
async def _get_scheme_for_update(session, scheme_id: str) -> SchemeRecord:
scheme_result = await session.execute(
select(SchemeRecord)
.where(SchemeRecord.scheme_id == scheme_id)
.with_for_update()
)
scheme = scheme_result.scalar_one_or_none()
if scheme is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Scheme not found",
)
return scheme
async def _get_current_version_for_scheme(session, scheme: SchemeRecord) -> SchemeVersionRecord:
version_result = await session.execute(
select(SchemeVersionRecord)
.where(
SchemeVersionRecord.scheme_id == scheme.scheme_id,
SchemeVersionRecord.version_number == scheme.current_version_number,
)
.with_for_update()
)
version = version_result.scalar_one_or_none()
if version is None:
_raise_current_version_inconsistent(
scheme_id=scheme.scheme_id,
current_version_number=scheme.current_version_number,
)
return version
async def create_scheme_from_upload( async def create_scheme_from_upload(
@@ -37,6 +82,55 @@ async def create_scheme_from_upload(
return scheme_id return scheme_id
async def create_scheme_from_upload_with_initial_version(
*,
source_upload_id: str,
name: str,
normalized_storage_path: str,
normalized_elements_count: int,
normalized_seats_count: int,
normalized_groups_count: int,
normalized_sectors_count: int,
display_svg_storage_path: str | None = None,
display_svg_status: str = "pending",
display_svg_generated_at=None,
) -> tuple[str, str]:
scheme_id = uuid4().hex
scheme_version_id = uuid4().hex
async with AsyncSessionLocal() as session:
scheme = SchemeRecord(
scheme_id=scheme_id,
source_upload_id=source_upload_id,
name=name,
status="draft",
current_version_number=1,
normalized_elements_count=normalized_elements_count,
normalized_seats_count=normalized_seats_count,
normalized_groups_count=normalized_groups_count,
normalized_sectors_count=normalized_sectors_count,
)
version = SchemeVersionRecord(
scheme_version_id=scheme_version_id,
scheme_id=scheme_id,
version_number=1,
status="draft",
normalized_storage_path=normalized_storage_path,
normalized_elements_count=normalized_elements_count,
normalized_seats_count=normalized_seats_count,
normalized_groups_count=normalized_groups_count,
normalized_sectors_count=normalized_sectors_count,
display_svg_storage_path=display_svg_storage_path,
display_svg_status=display_svg_status,
display_svg_generated_at=display_svg_generated_at,
)
session.add(scheme)
session.add(version)
await session.commit()
return scheme_id, scheme_version_id
async def list_scheme_records(limit: int = 50, offset: int = 0) -> list[SchemeRecord]: async def list_scheme_records(limit: int = 50, offset: int = 0) -> list[SchemeRecord]:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
result = await session.execute( result = await session.execute(
@@ -72,127 +166,60 @@ async def get_scheme_record_by_scheme_id(scheme_id: str) -> SchemeRecord:
async def publish_scheme(scheme_id: str) -> SchemeRecord: async def publish_scheme(scheme_id: str) -> SchemeRecord:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
scheme_result = await session.execute( async with session.begin():
select(SchemeRecord).where(SchemeRecord.scheme_id == scheme_id) scheme = await _get_scheme_for_update(session, scheme_id)
) version = await _get_current_version_for_scheme(session, scheme)
scheme = scheme_result.scalar_one_or_none() scheme.status = "published"
scheme.published_at = func.now()
version.status = "published"
if scheme is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Scheme not found",
)
version_result = await session.execute(
select(SchemeVersionRecord).where(
SchemeVersionRecord.scheme_id == scheme.scheme_id,
SchemeVersionRecord.version_number == scheme.current_version_number,
)
)
version = version_result.scalar_one_or_none()
if version is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Current scheme version not found",
)
scheme.status = "published"
scheme.published_at = func.now()
version.status = "published"
await session.commit()
await session.refresh(scheme) await session.refresh(scheme)
return scheme return scheme
async def unpublish_scheme(scheme_id: str) -> SchemeRecord: async def unpublish_scheme(scheme_id: str) -> SchemeRecord:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
scheme_result = await session.execute( async with session.begin():
select(SchemeRecord).where(SchemeRecord.scheme_id == scheme_id) scheme = await _get_scheme_for_update(session, scheme_id)
) version = await _get_current_version_for_scheme(session, scheme)
scheme = scheme_result.scalar_one_or_none() scheme.status = "draft"
scheme.published_at = None
version.status = "draft"
if scheme is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Scheme not found",
)
version_result = await session.execute(
select(SchemeVersionRecord).where(
SchemeVersionRecord.scheme_id == scheme.scheme_id,
SchemeVersionRecord.version_number == scheme.current_version_number,
)
)
version = version_result.scalar_one_or_none()
if version is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Current scheme version not found",
)
scheme.status = "draft"
scheme.published_at = None
version.status = "draft"
await session.commit()
await session.refresh(scheme) await session.refresh(scheme)
return scheme return scheme
async def rollback_scheme_to_version(scheme_id: str, target_version_number: int) -> SchemeRecord: async def rollback_scheme_to_version(scheme_id: str, target_version_number: int) -> SchemeRecord:
async with AsyncSessionLocal() as session: async with AsyncSessionLocal() as session:
scheme_result = await session.execute( async with session.begin():
select(SchemeRecord).where(SchemeRecord.scheme_id == scheme_id) scheme = await _get_scheme_for_update(session, scheme_id)
) current_version = await _get_current_version_for_scheme(session, scheme)
scheme = scheme_result.scalar_one_or_none()
if scheme is None: target_result = await session.execute(
raise HTTPException( select(SchemeVersionRecord).where(
status_code=status.HTTP_404_NOT_FOUND, SchemeVersionRecord.scheme_id == scheme.scheme_id,
detail="Scheme not found", SchemeVersionRecord.version_number == target_version_number,
)
) )
target_version = target_result.scalar_one_or_none()
target_result = await session.execute( if target_version is None:
select(SchemeVersionRecord).where( raise HTTPException(
SchemeVersionRecord.scheme_id == scheme.scheme_id, status_code=status.HTTP_404_NOT_FOUND,
SchemeVersionRecord.version_number == target_version_number, detail="Target scheme version not found",
) )
)
target_version = target_result.scalar_one_or_none()
if target_version is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Target scheme version not found",
)
current_result = await session.execute(
select(SchemeVersionRecord).where(
SchemeVersionRecord.scheme_id == scheme.scheme_id,
SchemeVersionRecord.version_number == scheme.current_version_number,
)
)
current_version = current_result.scalar_one_or_none()
if current_version is not None:
current_version.status = "draft" current_version.status = "draft"
target_version.status = "draft"
scheme.current_version_number = target_version.version_number
scheme.status = "draft"
scheme.published_at = None
target_version.status = "draft" scheme.normalized_elements_count = target_version.normalized_elements_count
scheme.current_version_number = target_version.version_number scheme.normalized_seats_count = target_version.normalized_seats_count
scheme.status = "draft" scheme.normalized_groups_count = target_version.normalized_groups_count
scheme.published_at = None scheme.normalized_sectors_count = target_version.normalized_sectors_count
scheme.normalized_elements_count = target_version.normalized_elements_count
scheme.normalized_seats_count = target_version.normalized_seats_count
scheme.normalized_groups_count = target_version.normalized_groups_count
scheme.normalized_sectors_count = target_version.normalized_sectors_count
await session.commit()
await session.refresh(scheme) await session.refresh(scheme)
return scheme return scheme

View File

@@ -14,7 +14,9 @@ def resolve_role(api_key: str) -> str | None:
return None return None
async def require_api_key(x_api_key: str | None = Header(default=None, alias="X-API-Key")) -> str: async def require_api_key(
x_api_key: str | None = Header(default=None, alias=settings.auth_header_name),
) -> str:
if not x_api_key: if not x_api_key:
raise HTTPException( raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, status_code=status.HTTP_401_UNAUTHORIZED,

View File

@@ -7,13 +7,21 @@ from app.services.api_errors import raise_unprocessable
def _raise_uniqueness_error(message: str, detail: dict | None = None) -> None: def _raise_uniqueness_error(message: str, detail: dict | None = None) -> None:
payload = detail or {"code": "editor_uniqueness_error", "message": message} if detail:
raise_unprocessable(**payload) code = detail.pop("code", "editor_uniqueness_error")
msg = detail.pop("message", message)
raise_unprocessable(code=code, message=msg, details=detail)
else:
raise_unprocessable(code="editor_uniqueness_error", message=message)
def _raise_reference_error(message: str, detail: dict | None = None) -> None: def _raise_reference_error(message: str, detail: dict | None = None) -> None:
payload = detail or {"code": "editor_reference_error", "message": message} if detail:
raise_unprocessable(**payload) code = detail.pop("code", "editor_reference_error")
msg = detail.pop("message", message)
raise_unprocessable(code=code, message=msg, details=detail)
else:
raise_unprocessable(code="editor_reference_error", message=message)
async def validate_single_seat_patch_uniqueness( async def validate_single_seat_patch_uniqueness(

View File

@@ -6,9 +6,8 @@ from app.repositories.scheme_sectors import list_scheme_version_sectors
from app.services.baseline_selector import select_baseline_scheme_version from app.services.baseline_selector import select_baseline_scheme_version
def _serialize_sector(row) -> dict: def _sector_compare_value(row) -> dict:
return { return {
"sector_record_id": row.sector_record_id,
"element_id": row.element_id, "element_id": row.element_id,
"sector_id": row.sector_id, "sector_id": row.sector_id,
"name": row.name, "name": row.name,
@@ -16,9 +15,14 @@ def _serialize_sector(row) -> dict:
} }
def _serialize_group(row) -> dict: def _sector_response_value(row) -> dict:
payload = _sector_compare_value(row)
payload["sector_record_id"] = row.sector_record_id
return payload
def _group_compare_value(row) -> dict:
return { return {
"group_record_id": row.group_record_id,
"element_id": row.element_id, "element_id": row.element_id,
"group_id": row.group_id, "group_id": row.group_id,
"name": row.name, "name": row.name,
@@ -26,9 +30,14 @@ def _serialize_group(row) -> dict:
} }
def _serialize_seat(row) -> dict: def _group_response_value(row) -> dict:
payload = _group_compare_value(row)
payload["group_record_id"] = row.group_record_id
return payload
def _seat_compare_value(row) -> dict:
return { return {
"seat_record_id": row.seat_record_id,
"element_id": row.element_id, "element_id": row.element_id,
"seat_id": row.seat_id, "seat_id": row.seat_id,
"sector_id": row.sector_id, "sector_id": row.sector_id,
@@ -38,19 +47,33 @@ def _serialize_seat(row) -> dict:
} }
def _build_diff(before_map: dict, after_map: dict) -> list[dict]: def _seat_response_value(row) -> dict:
keys = sorted(set(before_map.keys()) | set(after_map.keys())) payload = _seat_compare_value(row)
payload["seat_record_id"] = row.seat_record_id
return payload
def _build_diff(
*,
before_compare_map: dict,
after_compare_map: dict,
before_payload_map: dict,
after_payload_map: dict,
) -> list[dict]:
keys = sorted(set(before_payload_map.keys()) | set(after_payload_map.keys()))
result: list[dict] = [] result: list[dict] = []
for key in keys: for key in keys:
before = before_map.get(key) before_compare = before_compare_map.get(key)
after = after_map.get(key) after_compare = after_compare_map.get(key)
before_payload = before_payload_map.get(key)
after_payload = after_payload_map.get(key)
if before is None and after is not None: if before_compare is None and after_compare is not None:
status = "added" status = "added"
elif before is not None and after is None: elif before_compare is not None and after_compare is None:
status = "removed" status = "removed"
elif before != after: elif before_compare != after_compare:
status = "changed" status = "changed"
else: else:
status = "unchanged" status = "unchanged"
@@ -59,13 +82,22 @@ def _build_diff(before_map: dict, after_map: dict) -> list[dict]:
{ {
"key": key, "key": key,
"status": status, "status": status,
"before": before, "before": before_payload,
"after": after, "after": after_payload,
} }
) )
return result return result
def _sector_key(row) -> str:
return row.sector_id if row.sector_id else (row.element_id if row.element_id else row.sector_record_id)
def _group_key(row) -> str:
return row.group_id if row.group_id else (row.element_id if row.element_id else row.group_record_id)
def _seat_key(row) -> str:
return row.seat_id if row.seat_id else (row.element_id if row.element_id else row.seat_record_id)
async def build_structure_diff( async def build_structure_diff(
*, *,
scheme_id: str, scheme_id: str,
@@ -83,32 +115,68 @@ async def build_structure_diff(
draft_seats = await list_scheme_version_seats(draft_scheme_version_id) draft_seats = await list_scheme_version_seats(draft_scheme_version_id)
if baseline is None: if baseline is None:
baseline_sector_map = {} baseline_sector_compare_map = {}
baseline_group_map = {} baseline_group_compare_map = {}
baseline_seat_map = {} baseline_seat_compare_map = {}
baseline_sector_payload_map = {}
baseline_group_payload_map = {}
baseline_seat_payload_map = {}
baseline_scheme_version_id = None baseline_scheme_version_id = None
else: else:
baseline_scheme_version_id = baseline.scheme_version_id baseline_scheme_version_id = baseline.scheme_version_id
baseline_sector_map = { baseline_sectors = await list_scheme_version_sectors(baseline.scheme_version_id)
row.sector_record_id: _serialize_sector(row) baseline_groups = await list_scheme_version_groups(baseline.scheme_version_id)
for row in await list_scheme_version_sectors(baseline.scheme_version_id) baseline_seats = await list_scheme_version_seats(baseline.scheme_version_id)
baseline_sector_compare_map = {
_sector_key(row): _sector_compare_value(row)
for row in baseline_sectors
} }
baseline_group_map = { baseline_sector_payload_map = {
row.group_record_id: _serialize_group(row) _sector_key(row): _sector_response_value(row)
for row in await list_scheme_version_groups(baseline.scheme_version_id) for row in baseline_sectors
} }
baseline_seat_map = { baseline_group_compare_map = {
row.seat_record_id: _serialize_seat(row) _group_key(row): _group_compare_value(row)
for row in await list_scheme_version_seats(baseline.scheme_version_id) for row in baseline_groups
}
baseline_group_payload_map = {
_group_key(row): _group_response_value(row)
for row in baseline_groups
}
baseline_seat_compare_map = {
_seat_key(row): _seat_compare_value(row)
for row in baseline_seats
}
baseline_seat_payload_map = {
_seat_key(row): _seat_response_value(row)
for row in baseline_seats
} }
draft_sector_map = {row.sector_record_id: _serialize_sector(row) for row in draft_sectors} draft_sector_compare_map = {_sector_key(row): _sector_compare_value(row) for row in draft_sectors}
draft_group_map = {row.group_record_id: _serialize_group(row) for row in draft_groups} draft_sector_payload_map = {_sector_key(row): _sector_response_value(row) for row in draft_sectors}
draft_seat_map = {row.seat_record_id: _serialize_seat(row) for row in draft_seats} draft_group_compare_map = {_group_key(row): _group_compare_value(row) for row in draft_groups}
draft_group_payload_map = {_group_key(row): _group_response_value(row) for row in draft_groups}
draft_seat_compare_map = {_seat_key(row): _seat_compare_value(row) for row in draft_seats}
draft_seat_payload_map = {_seat_key(row): _seat_response_value(row) for row in draft_seats}
sector_diff = _build_diff(baseline_sector_map, draft_sector_map) sector_diff = _build_diff(
group_diff = _build_diff(baseline_group_map, draft_group_map) before_compare_map=baseline_sector_compare_map,
seat_diff = _build_diff(baseline_seat_map, draft_seat_map) after_compare_map=draft_sector_compare_map,
before_payload_map=baseline_sector_payload_map,
after_payload_map=draft_sector_payload_map,
)
group_diff = _build_diff(
before_compare_map=baseline_group_compare_map,
after_compare_map=draft_group_compare_map,
before_payload_map=baseline_group_payload_map,
after_payload_map=draft_group_payload_map,
)
seat_diff = _build_diff(
before_compare_map=baseline_seat_compare_map,
after_compare_map=draft_seat_compare_map,
before_payload_map=baseline_seat_payload_map,
after_payload_map=draft_seat_payload_map,
)
return { return {
"baseline_scheme_version_id": baseline_scheme_version_id, "baseline_scheme_version_id": baseline_scheme_version_id,

View File

@@ -17,17 +17,18 @@ export API_URL="http://127.0.0.1:9020"
export API_KEY="admin-local-dev-key" export API_KEY="admin-local-dev-key"
export SCHEME_ID="82086336d385427f9d56244f9e1dd772" export SCHEME_ID="82086336d385427f9d56244f9e1dd772"
## Main script ## Main scripts
Primary operator regression: Primary operator regressions:
`backend/scripts/smoke_regression.sh` - `backend/scripts/smoke_regression.sh`
- `backend/scripts/editor_mutation_regression.sh`
The script is expected to fail fast on any contract break or unexpected 5xx. The scripts are expected to fail fast on any contract break or unexpected 5xx.
## 1. Health / system ## 1. Health / system
- GET /healthz -> 200 - GET /healthz -> 200 (smoke uses a bounded retry/wait loop and fails explicitly if the API never becomes ready)
- GET /api/v1/ping -> 200 - GET /api/v1/ping -> 200
- GET /api/v1/db/ping -> 200 - GET /api/v1/db/ping -> 200
- GET /api/v1/manifest -> 200 - GET /api/v1/manifest -> 200
@@ -129,25 +130,33 @@ Validate:
- test seat preview explains selectable / has_price state - test seat preview explains selectable / has_price state
- priced test seat amount is serialized as string - priced test seat amount is serialized as string
## 9. Draft mutations and validation guards ## 9. Draft mutation regression
For current draft version: Use:
- `backend/scripts/editor_mutation_regression.sh`
- POST /api/v1/schemes/{scheme_id}/draft/sectors -> 200 or 422 This script checks:
- POST /api/v1/schemes/{scheme_id}/draft/groups -> 200 or 422 - create sector
- PATCH /api/v1/schemes/{scheme_id}/draft/seats/records/{seat_record_id} -> 200 or 422 - create group
- POST /api/v1/schemes/{scheme_id}/draft/seats/bulk -> 200 or 422 - patch seat
- POST /api/v1/schemes/{scheme_id}/draft/remap/preview -> 200 or 422 - bulk seat update
- POST /api/v1/schemes/{scheme_id}/draft/remap/apply -> 200 or 422 - patch sector
- POST /api/v1/schemes/{scheme_id}/draft/repair-references -> 200 - patch group
- duplicate entity validation paths
- stale draft conflict
- remap preview validation path
- repair references
- delete created sector/group
- post-mutation read-model consistency
Validate: Validate:
- duplicate ids return typed 422 - created entities are returned by API
- duplicate element binding returns typed 422 - patched draft records are actually changed
- unknown sector/group references return typed 422 - bulk update changes persisted fields
- remap without filters returns typed 422 - duplicate ids return 422
- stale expected_scheme_version_id returns typed 409 - stale expected_scheme_version_id returns typed 409
- published current version rejects draft mutations with typed draft_not_editable conflict - remap preview without filters returns typed 422
- post-mutation summary / validation / compare-preview remain readable and deterministic
## 10. Draft publish preview ## 10. Draft publish preview
@@ -190,7 +199,9 @@ Validate:
- artifact audit does not report orphan files or missing files for DB rows in normal state - artifact audit does not report orphan files or missing files for DB rows in normal state
- validation report is readable and deterministic - validation report is readable and deterministic
- pricing cleanup preview returns matched candidates and safe_to_delete_count - pricing cleanup preview returns matched candidates and safe_to_delete_count
- pricing cleanup dry-run returns deleted_count=0 and would_delete_count>0 - pricing cleanup dry-run returns deleted_count=0
- idempotent cleanup is valid in both states: `matched_total=0` with `would_delete_count=0`, or `matched_total>0` with `would_delete_count>0`
- smoke does not require cleanup dry-run to always find something to delete
- admin routes do not produce 500 for healthy scheme state - admin routes do not produce 500 for healthy scheme state
## 13. Audit trail ## 13. Audit trail
@@ -213,6 +224,7 @@ Regression is considered failed if any of the following happen:
- publish readiness returns 500 - publish readiness returns 500
- editor context or draft ensure returns 500 - editor context or draft ensure returns 500
- draft summary / structure / validation / compare-preview returns 500 - draft summary / structure / validation / compare-preview returns 500
- editor mutation regression returns non-zero exit code
- pricing bundle or diagnostics contract changes unexpectedly - pricing bundle or diagnostics contract changes unexpectedly
- admin audit/cleanup endpoints fail on healthy environment - admin audit/cleanup endpoints fail on healthy environment
- pricing cleanup dry-run mutates data - pricing cleanup dry-run mutates data
@@ -230,3 +242,4 @@ Run this checklist after:
- draft lifecycle changes - draft lifecycle changes
- publish readiness changes - publish readiness changes
- admin cleanup changes - admin cleanup changes
- editor mutation changes

View File

@@ -0,0 +1,274 @@
#!/usr/bin/env bash
set -Eeuo pipefail
API_URL="${API_URL:-http://127.0.0.1:9020}"
API_KEY="${API_KEY:-admin-local-dev-key}"
SCHEME_ID="${SCHEME_ID:-82086336d385427f9d56244f9e1dd772}"
TMP_DIR="$(mktemp -d)"
trap 'rm -rf "${TMP_DIR}"' EXIT
log() {
echo
echo "===== $* ====="
}
fail() {
echo
echo "[FAIL] $*" >&2
exit 1
}
request() {
local name="$1"
local method="$2"
local url="$3"
local body="${4:-}"
local expected="${5:-200}"
local body_file="${TMP_DIR}/${name}.body"
local code_file="${TMP_DIR}/${name}.code"
if [[ -n "${body}" ]]; then
curl -sS \
-X "${method}" \
-H "X-API-Key: ${API_KEY}" \
-H "Content-Type: application/json" \
-o "${body_file}" \
-w "%{http_code}" \
--data "${body}" \
"${url}" > "${code_file}"
else
curl -sS \
-X "${method}" \
-H "X-API-Key: ${API_KEY}" \
-o "${body_file}" \
-w "%{http_code}" \
"${url}" > "${code_file}"
fi
local code
code="$(cat "${code_file}")"
echo "[${method}] ${url} -> ${code}"
cat "${body_file}"
echo
if [[ "${code}" != "${expected}" ]]; then
fail "Unexpected HTTP status for ${name}: expected ${expected}, got ${code}"
fi
}
json_get() {
local file="$1"
local expr="$2"
python3 - <<PY
import json
from pathlib import Path
data = json.loads(Path("${file}").read_text())
expr = "${expr}"
value = data
for part in expr.split("."):
if not part:
continue
if part.startswith("[") and part.endswith("]"):
cond = part[1:-1]
try:
if cond.endswith("!=null"):
k = cond[:-6]
value = next(item for item in value if item.get(k) is not None)
elif cond.endswith("==null"):
k = cond[:-6]
value = next(item for item in value if item.get(k) is None)
elif cond == "LAST":
value = value[-1]
else:
value = value[0]
except StopIteration:
value = None
elif part.isdigit():
value = value[int(part)]
else:
value = value[part] if value else None
if value is None:
print("")
elif isinstance(value, bool):
print("true" if value else "false")
else:
print(value)
PY
}
assert_json_eq() {
local file="$1"
local expr="$2"
local expected="$3"
local actual
actual="$(json_get "${file}" "${expr}")"
if [[ "${actual}" != "${expected}" ]]; then
fail "Assertion failed: ${expr} expected '${expected}', got '${actual}'"
fi
echo "[OK] ${expr}=${actual}"
}
extract_current() {
request "current" "GET" "${API_URL}/api/v1/schemes/${SCHEME_ID}/current" "" "200"
CURRENT_VERSION_ID="$(json_get "${TMP_DIR}/current.body" "scheme_version_id")"
CURRENT_STATUS="$(json_get "${TMP_DIR}/current.body" "status")"
echo "CURRENT_VERSION_ID=${CURRENT_VERSION_ID}"
echo "CURRENT_STATUS=${CURRENT_STATUS}"
}
ensure_draft() {
request "ensure_draft" "POST" "${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/ensure" "" "200"
DRAFT_VERSION_ID="$(json_get "${TMP_DIR}/ensure_draft.body" "scheme_version_id")"
DRAFT_CREATED="$(json_get "${TMP_DIR}/ensure_draft.body" "created")"
echo "DRAFT_VERSION_ID=${DRAFT_VERSION_ID}"
echo "DRAFT_CREATED=${DRAFT_CREATED}"
}
read_structure() {
request "draft_structure" "GET" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/structure?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"" "200"
SEAT_RECORD_ID="$(json_get "${TMP_DIR}/draft_structure.body" "seats.[seat_id!=null].seat_record_id")"
SEAT_ID="$(json_get "${TMP_DIR}/draft_structure.body" "seats.[seat_id!=null].seat_id")"
ORIG_SEAT_NUMBER="$(json_get "${TMP_DIR}/draft_structure.body" "seats.[seat_id!=null].seat_number")"
echo "SEAT_RECORD_ID=${SEAT_RECORD_ID}"
echo "SEAT_ID=${SEAT_ID}"
echo "ORIG_SEAT_NUMBER=${ORIG_SEAT_NUMBER}"
}
check_read_models() {
request "draft_summary" "GET" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/summary?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"" "200"
request "draft_validation" "GET" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/validation?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"" "200"
request "draft_compare_preview" "GET" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/compare-preview?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"" "200"
assert_json_eq "${TMP_DIR}/draft_summary.body" "scheme_version_id" "${DRAFT_VERSION_ID}"
assert_json_eq "${TMP_DIR}/draft_validation.body" "scheme_version_id" "${DRAFT_VERSION_ID}"
assert_json_eq "${TMP_DIR}/draft_compare_preview.body" "draft_scheme_version_id" "${DRAFT_VERSION_ID}"
}
log "health"
curl -fsS "${API_URL}/healthz" >/dev/null || fail "healthz failed"
echo "[OK] healthz"
log "current + ensure draft"
extract_current
ensure_draft
read_structure
check_read_models
STAMP="$(date +%s)"
TEST_SECTOR_ID="reg-sector-${STAMP}"
TEST_GROUP_ID="reg-group-${STAMP}"
TEST_SECTOR_ELEMENT_ID="reg-sector-element-${STAMP}"
TEST_GROUP_ELEMENT_ID="reg-group-element-${STAMP}"
log "create sector"
request "create_sector" "POST" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/sectors?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{\"element_id\":\"${TEST_SECTOR_ELEMENT_ID}\",\"sector_id\":\"${TEST_SECTOR_ID}\",\"name\":\"${TEST_SECTOR_ID}\"}" \
"200"
CREATE_SECTOR_RECORD_ID="$(json_get "${TMP_DIR}/create_sector.body" "sector_record_id")"
echo "CREATE_SECTOR_RECORD_ID=${CREATE_SECTOR_RECORD_ID}"
log "create group"
request "create_group" "POST" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/groups?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{\"element_id\":\"${TEST_GROUP_ELEMENT_ID}\",\"group_id\":\"${TEST_GROUP_ID}\",\"name\":\"${TEST_GROUP_ID}\"}" \
"200"
CREATE_GROUP_RECORD_ID="$(json_get "${TMP_DIR}/create_group.body" "group_record_id")"
echo "CREATE_GROUP_RECORD_ID=${CREATE_GROUP_RECORD_ID}"
log "patch seat -> bind to new group"
request "patch_seat_group" "PATCH" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/seats/records/${SEAT_RECORD_ID}?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{\"group_id\":\"${TEST_GROUP_ID}\"}" \
"200"
log "verify seat after patch"
request "seat_after_patch" "GET" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/seats/records/${SEAT_RECORD_ID}?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"" "200"
assert_json_eq "${TMP_DIR}/seat_after_patch.body" "group_id" "${TEST_GROUP_ID}"
assert_json_eq "${TMP_DIR}/seat_after_patch.body" "seat_number" "${ORIG_SEAT_NUMBER}"
log "patch group name"
request "patch_group" "PATCH" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/groups/records/${CREATE_GROUP_RECORD_ID}?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{\"name\":\"${TEST_GROUP_ID}-updated\"}" \
"200"
log "patch sector name"
request "patch_sector" "PATCH" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/sectors/records/${CREATE_SECTOR_RECORD_ID}?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{\"name\":\"${TEST_SECTOR_ID}-updated\"}" \
"200"
log "verify sector after patch"
request "sector_after_patch" "GET" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/sectors/records/${CREATE_SECTOR_RECORD_ID}?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"" "200"
assert_json_eq "${TMP_DIR}/sector_after_patch.body" "name" "${TEST_SECTOR_ID}-updated"
assert_json_eq "${TMP_DIR}/sector_after_patch.body" "sector_id" "${TEST_SECTOR_ID}"
log "bulk seat update validation path"
request "bulk_seats" "POST" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/seats/bulk?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{\"items\":[{\"seat_record_id\":\"${SEAT_RECORD_ID}\",\"row_label\":\"ZZ\",\"seat_number\":\"999\"}]}" \
"200"
log "verify seat after bulk patch"
request "seat_after_bulk" "GET" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/seats/records/${SEAT_RECORD_ID}?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"" "200"
assert_json_eq "${TMP_DIR}/seat_after_bulk.body" "row_label" "ZZ"
assert_json_eq "${TMP_DIR}/seat_after_bulk.body" "seat_number" "999"
log "typed error: duplicate sector id"
request "duplicate_sector" "POST" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/sectors?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{\"element_id\":\"dup-${TEST_SECTOR_ELEMENT_ID}\",\"sector_id\":\"${TEST_SECTOR_ID}\",\"name\":\"dup\"}" \
"422"
log "typed error: duplicate group id"
request "duplicate_group" "POST" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/groups?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{\"element_id\":\"dup-${TEST_GROUP_ELEMENT_ID}\",\"group_id\":\"${TEST_GROUP_ID}\",\"name\":\"dup\"}" \
"422"
log "typed error: stale draft version"
request "stale_patch" "PATCH" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/seats/records/${SEAT_RECORD_ID}?expected_scheme_version_id=deadbeefdeadbeefdeadbeefdeadbeef" \
"{\"row_label\":\"STALE\"}" \
"409"
log "typed error: remap preview without filters"
request "remap_preview_invalid" "POST" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/remap/preview?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{}" \
"422"
log "repair references"
request "repair_refs" "POST" \
"${API_URL}/api/v1/schemes/${SCHEME_ID}/draft/repair-references?expected_scheme_version_id=${DRAFT_VERSION_ID}" \
"{}" \
"200"
log "post-mutation read models"
check_read_models
log "done"
echo "[OK] editor mutation regression completed successfully"

View File

@@ -7,6 +7,8 @@ SCHEME_ID="${SCHEME_ID:-82086336d385427f9d56244f9e1dd772}"
TMP_DIR="$(mktemp -d)" TMP_DIR="$(mktemp -d)"
trap 'rm -rf "${TMP_DIR}"' EXIT trap 'rm -rf "${TMP_DIR}"' EXIT
HEALTH_MAX_ATTEMPTS="${HEALTH_MAX_ATTEMPTS:-20}"
HEALTH_RETRY_DELAY_SECONDS="${HEALTH_RETRY_DELAY_SECONDS:-1}"
request() { request() {
local name="$1" local name="$1"
@@ -107,7 +109,25 @@ assert_json_int_gt() {
} }
echo "===== health =====" echo "===== health ====="
curl -i "${API_URL}/healthz" echo "waiting for API to be ready..."
health_ready="false"
for ((i = 1; i <= HEALTH_MAX_ATTEMPTS; i++)); do
health_status="$(curl -sS -o /dev/null -w "%{http_code}" "${API_URL}/healthz" || true)"
if [[ "${health_status}" == "200" ]]; then
health_ready="true"
echo "API is ready"
break
fi
echo "waiting... (${i}/${HEALTH_MAX_ATTEMPTS}) healthz=${health_status}"
sleep "${HEALTH_RETRY_DELAY_SECONDS}"
done
if [[ "${health_ready}" != "true" ]]; then
echo "[FAIL] API did not become ready on ${API_URL}/healthz after ${HEALTH_MAX_ATTEMPTS} attempts" >&2
exit 1
fi
curl -sS -i "${API_URL}/healthz"
request "ping" "GET" "${API_URL}/api/v1/ping" "200" request "ping" "GET" "${API_URL}/api/v1/ping" "200"
request "db_ping" "GET" "${API_URL}/api/v1/db/ping" "200" request "db_ping" "GET" "${API_URL}/api/v1/db/ping" "200"
@@ -194,7 +214,17 @@ request "admin_cleanup_dry_run" "POST" "${API_URL}/api/v1/admin/schemes/${SCHEME
assert_json_eq "${TMP_DIR}/admin_cleanup_dry_run.body" "dry_run" "true" assert_json_eq "${TMP_DIR}/admin_cleanup_dry_run.body" "dry_run" "true"
assert_json_eq "${TMP_DIR}/admin_cleanup_dry_run.body" "deleted_count" "0" assert_json_eq "${TMP_DIR}/admin_cleanup_dry_run.body" "deleted_count" "0"
assert_json_int_gt "${TMP_DIR}/admin_cleanup_dry_run.body" "would_delete_count" "0" MATCHED_TOTAL="$(json_get "${TMP_DIR}/admin_cleanup_dry_run.body" "matched_total")"
WOULD_DELETE="$(json_get "${TMP_DIR}/admin_cleanup_dry_run.body" "would_delete_count")"
if [[ "${MATCHED_TOTAL}" == "0" ]]; then
if [[ "${WOULD_DELETE}" != "0" ]]; then
echo "[FAIL] would_delete_count expected 0 when matched_total is 0, got ${WOULD_DELETE}" >&2
exit 1
fi
echo "[OK] matched_total=0, would_delete_count=0 (clean state)"
else
assert_json_int_gt "${TMP_DIR}/admin_cleanup_dry_run.body" "would_delete_count" "0"
fi
request "audit_trail" "GET" "${API_URL}/api/v1/schemes/${SCHEME_ID}/audit" "200" request "audit_trail" "GET" "${API_URL}/api/v1/schemes/${SCHEME_ID}/audit" "200"

View File

@@ -25,10 +25,11 @@ services:
container_name: svg-service container_name: svg-service
env_file: env_file:
- ./.env - ./.env
command: ["sh", "-c", "uvicorn app.main:app --host 0.0.0.0 --port ${BACKEND_PORT}"]
ports: ports:
- "9020:9020" - "${BACKEND_PORT}:${BACKEND_PORT}"
volumes: volumes:
- ./storage:/data - ./storage:${STORAGE_ROOT}
depends_on: depends_on:
postgres: postgres:
condition: service_healthy condition: service_healthy