first commit
This commit is contained in:
234
.gitea/workflows/ci-cd.yaml
Normal file
234
.gitea/workflows/ci-cd.yaml
Normal file
@@ -0,0 +1,234 @@
|
||||
# =============================================================================
|
||||
# BD FHIR National — Gitea Actions CI/CD Workflow
|
||||
#
|
||||
# Trigger: Push of a version tag matching v*.*.* (e.g. v1.0.0, v1.2.3)
|
||||
# Runner: Self-hosted Gitea Actions runner (separate VM, Docker installed)
|
||||
# Registry: Gitea Packages (built-in container registry)
|
||||
#
|
||||
# Image published to:
|
||||
# {GITEA_SERVER}/{GITEA_OWNER}/{GITEA_REPO}:{tag}
|
||||
# e.g. git.dghs.gov.bd/dghs/bd-fhir-national:v1.0.0
|
||||
#
|
||||
# REQUIRED SECRETS (set in Gitea → Repository → Settings → Secrets):
|
||||
# REGISTRY_USERNAME — Gitea username with write access to packages
|
||||
# REGISTRY_PASSWORD — Gitea personal access token (packages:write scope)
|
||||
# IG_PACKAGE_B64 — BD Core IG .tgz encoded as base64
|
||||
# Generate: base64 -w 0 bd.gov.dghs.core-0.2.1.tgz
|
||||
#
|
||||
# REQUIRED VARIABLES (set in Gitea → Repository → Settings → Variables):
|
||||
# IG_PACKAGE_FILENAME — exact filename, e.g. bd.gov.dghs.core-0.2.1.tgz
|
||||
# IG_VERSION — version string, e.g. 0.2.1
|
||||
#
|
||||
# HOW TO TAG AND TRIGGER A BUILD:
|
||||
# git tag v1.0.0
|
||||
# git push origin v1.0.0
|
||||
#
|
||||
# HOW TO UPDATE THE IG PACKAGE FOR A NEW IG VERSION:
|
||||
# 1. base64 -w 0 bd.gov.dghs.core-0.3.0.tgz | copy to IG_PACKAGE_B64 secret
|
||||
# 2. Update IG_PACKAGE_FILENAME variable to bd.gov.dghs.core-0.3.0.tgz
|
||||
# 3. Update IG_VERSION variable to 0.3.0
|
||||
# 4. Tag and push as normal
|
||||
# =============================================================================
|
||||
|
||||
name: Build and Publish HAPI Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
|
||||
jobs:
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Job 1: Test
|
||||
# Runs Maven tests using TestContainers (real PostgreSQL 15, no H2).
|
||||
# Must pass before the image is built. A failing test never produces an image.
|
||||
# ---------------------------------------------------------------------------
|
||||
test:
|
||||
name: Run tests
|
||||
runs-on: ubuntu-latest # use your self-hosted runner label if configured
|
||||
# replace with: runs-on: self-hosted
|
||||
# if your runner has no specific label
|
||||
|
||||
steps:
|
||||
- name: Checkout source
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Java 17
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
java-version: "17"
|
||||
distribution: temurin
|
||||
cache: maven
|
||||
|
||||
# TestContainers requires Docker on the runner.
|
||||
# Your runner VM has Docker installed — this step verifies it.
|
||||
- name: Verify Docker available for TestContainers
|
||||
run: docker info
|
||||
|
||||
- name: Place IG package for tests
|
||||
# The IG package must be present before mvn test runs because
|
||||
# FhirServerConfig.validateIgPackagePresent() checks on startup.
|
||||
# Decoded from the base64 secret into the correct classpath location.
|
||||
run: |
|
||||
echo "${{ secrets.IG_PACKAGE_B64 }}" | base64 -d > \
|
||||
hapi-overlay/src/main/resources/packages/${{ vars.IG_PACKAGE_FILENAME }}
|
||||
echo "IG package placed: $(ls -lh hapi-overlay/src/main/resources/packages/)"
|
||||
|
||||
- name: Run Maven tests
|
||||
run: |
|
||||
mvn test \
|
||||
--batch-mode \
|
||||
--no-transfer-progress \
|
||||
-pl hapi-overlay \
|
||||
-am
|
||||
env:
|
||||
# TestContainers pulls postgres:15 from Docker Hub during tests.
|
||||
# If your runner has no internet access, pre-pull the image and
|
||||
# set TESTCONTAINERS_RYUK_DISABLED=true with a local image config.
|
||||
TESTCONTAINERS_RYUK_DISABLED: false
|
||||
|
||||
- name: Upload test reports on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: surefire-reports
|
||||
path: hapi-overlay/target/surefire-reports/
|
||||
retention-days: 7
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Job 2: Build and publish Docker image
|
||||
# Only runs after test job passes.
|
||||
# Produces image tagged with both the git tag (v1.0.0) and the version
|
||||
# without the v prefix (1.0.0) for docker-compose .env compatibility.
|
||||
# ---------------------------------------------------------------------------
|
||||
build-and-push:
|
||||
name: Build and push image
|
||||
runs-on: ubuntu-latest # replace with: runs-on: self-hosted if needed
|
||||
needs: test
|
||||
|
||||
steps:
|
||||
- name: Checkout source
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Java 17
|
||||
uses: actions/setup-java@v4
|
||||
with:
|
||||
java-version: "17"
|
||||
distribution: temurin
|
||||
cache: maven
|
||||
|
||||
# Derive version strings from the git tag.
|
||||
# git tag v1.0.0 → TAG_VERSION=v1.0.0, PLAIN_VERSION=1.0.0
|
||||
- name: Extract version from tag
|
||||
id: version
|
||||
run: |
|
||||
TAG="${GITHUB_REF_NAME}"
|
||||
PLAIN="${TAG#v}"
|
||||
echo "tag_version=${TAG}" >> $GITHUB_OUTPUT
|
||||
echo "plain_version=${PLAIN}" >> $GITHUB_OUTPUT
|
||||
echo "git_commit=${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
echo "build_timestamp=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> $GITHUB_OUTPUT
|
||||
echo "Tag: ${TAG}, Plain: ${PLAIN}, Commit: ${GITHUB_SHA::8}"
|
||||
|
||||
# Gitea Packages registry URL format:
|
||||
# {gitea_host}/v2/{owner}/{repo}/manifests/{tag}
|
||||
# Login uses: git.dghs.gov.bd as the registry host
|
||||
- name: Log in to Gitea Packages registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ gitea.server_url != '' && gitea.server_url || 'git.dghs.gov.bd' }}
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
# Derive registry host from Gitea server URL.
|
||||
# Gitea Packages image path: {host}/{owner}/{repo}:{tag}
|
||||
- name: Derive image name
|
||||
id: image
|
||||
run: |
|
||||
# Extract hostname from Gitea server URL
|
||||
# e.g. https://git.dghs.gov.bd → git.dghs.gov.bd
|
||||
REGISTRY_HOST=$(echo "${{ gitea.server_url }}" | sed 's|https\?://||' | sed 's|/.*||')
|
||||
OWNER="${{ gitea.repository_owner }}"
|
||||
REPO="${{ gitea.repository }}"
|
||||
REPO_NAME="${REPO##*/}"
|
||||
IMAGE="${REGISTRY_HOST}/${OWNER}/${REPO_NAME}"
|
||||
echo "registry_host=${REGISTRY_HOST}" >> $GITHUB_OUTPUT
|
||||
echo "image=${IMAGE}" >> $GITHUB_OUTPUT
|
||||
echo "Image base: ${IMAGE}"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Place IG package for Docker build
|
||||
run: |
|
||||
echo "${{ secrets.IG_PACKAGE_B64 }}" | base64 -d > \
|
||||
hapi-overlay/src/main/resources/packages/${{ vars.IG_PACKAGE_FILENAME }}
|
||||
echo "IG package placed: $(ls -lh hapi-overlay/src/main/resources/packages/)"
|
||||
|
||||
# Run Maven package to produce the fat JAR before Docker build.
|
||||
# The Dockerfile COPY expects hapi-overlay/target/bd-fhir-hapi.jar.
|
||||
# -DskipTests: tests already ran in the test job.
|
||||
- name: Build fat JAR
|
||||
run: |
|
||||
mvn package \
|
||||
--batch-mode \
|
||||
--no-transfer-progress \
|
||||
-pl hapi-overlay \
|
||||
-am \
|
||||
-DskipTests
|
||||
echo "JAR: $(ls -lh hapi-overlay/target/bd-fhir-hapi.jar)"
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: hapi-overlay/Dockerfile
|
||||
push: true
|
||||
# Tag with both v-prefixed tag and plain version.
|
||||
# docker-compose .env uses the plain version: HAPI_IMAGE=....:1.0.0
|
||||
# The v-prefixed tag matches the git tag for traceability.
|
||||
tags: |
|
||||
${{ steps.image.outputs.image }}:${{ steps.version.outputs.tag_version }}
|
||||
${{ steps.image.outputs.image }}:${{ steps.version.outputs.plain_version }}
|
||||
${{ steps.image.outputs.image }}:latest
|
||||
build-args: |
|
||||
IG_PACKAGE=${{ vars.IG_PACKAGE_FILENAME }}
|
||||
BUILD_VERSION=${{ steps.version.outputs.plain_version }}
|
||||
GIT_COMMIT=${{ steps.version.outputs.git_commit }}
|
||||
BUILD_TIMESTAMP=${{ steps.version.outputs.build_timestamp }}
|
||||
# Layer cache: use Gitea registry as cache backend.
|
||||
# Speeds up subsequent builds when only source changes (not POM/deps).
|
||||
cache-from: type=registry,ref=${{ steps.image.outputs.image }}:buildcache
|
||||
cache-to: type=registry,ref=${{ steps.image.outputs.image }}:buildcache,mode=max
|
||||
# Provenance attestation — disable for simpler Gitea registry compatibility
|
||||
provenance: false
|
||||
|
||||
- name: Print published image details
|
||||
run: |
|
||||
echo "================================================"
|
||||
echo "Image published successfully"
|
||||
echo "Registry: ${{ steps.image.outputs.registry_host }}"
|
||||
echo "Image: ${{ steps.image.outputs.image }}"
|
||||
echo "Tags:"
|
||||
echo " ${{ steps.version.outputs.tag_version }}"
|
||||
echo " ${{ steps.version.outputs.plain_version }}"
|
||||
echo " latest"
|
||||
echo "Git commit: ${{ steps.version.outputs.git_commit }}"
|
||||
echo "IG package: ${{ vars.IG_PACKAGE_FILENAME }}"
|
||||
echo "IG version: ${{ vars.IG_VERSION }}"
|
||||
echo "================================================"
|
||||
echo ""
|
||||
echo "To deploy on production server:"
|
||||
echo " nano /opt/bd-fhir-national/.env"
|
||||
echo " # Set: HAPI_IMAGE=${{ steps.image.outputs.image }}:${{ steps.version.outputs.plain_version }}"
|
||||
echo " docker compose --env-file .env pull hapi"
|
||||
echo " docker compose --env-file .env up -d --no-deps hapi"
|
||||
|
||||
# Clean up the IG package from the workspace.
|
||||
# The runner is shared — do not leave the binary on disk between builds.
|
||||
- name: Clean up IG package from workspace
|
||||
if: always()
|
||||
run: |
|
||||
rm -f hapi-overlay/src/main/resources/packages/*.tgz
|
||||
echo "IG package removed from runner workspace"
|
||||
11
.gitignore
vendored
Normal file
11
.gitignore
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
.DS_Store
|
||||
**/.DS_Store
|
||||
|
||||
# Environment secrets
|
||||
.env
|
||||
|
||||
# Maven build output
|
||||
hapi-overlay/target/
|
||||
|
||||
# IG package binary — managed by CI pipeline
|
||||
hapi-overlay/src/main/resources/packages/*.tgz
|
||||
41
README.md
Normal file
41
README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# BD FHIR National Repository & Validation Engine
|
||||
|
||||
This is the central interoperability gateway for the **Directorate General of Health Services (DGHS), Bangladesh**. It provides a high-performance FHIR R4 repository with mandatory validation against national clinical standards.
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
1. **Configure Environment:**
|
||||
`cp .env.example .env` (See **Project Manifest** for variable descriptions).
|
||||
2. **Initialize Database:**
|
||||
Run `postgres/fhir/init.sh` and `postgres/audit/init.sh` on the host.
|
||||
3. **Deploy:**
|
||||
`docker compose up -d`
|
||||
|
||||
---
|
||||
|
||||
## 📚 Operational Documentation
|
||||
|
||||
All technical and procedural guides are located in the `ops/` directory. Refer to these before making changes to the production environment:
|
||||
|
||||
| Guide | Description |
|
||||
| --- | --- |
|
||||
| **[Project Manifest](https://git.dghs.gov.bd/riaz.somc/bd-fhir-national/ops/project-manifest.md)** | Full file inventory, architectural decisions, and pre-flight checklist. |
|
||||
| **[Deployment Guide](https://git.dghs.gov.bd/riaz.somc/bd-fhir-national/ops/deployment-guide.md)** | Step-by-step setup for Ubuntu 22.04+ and nine mandatory acceptance tests. |
|
||||
| **[Keycloak Setup](https://git.dghs.gov.bd/riaz.somc/bd-fhir-national/ops/keycloak-setup.md)** | Instructions for realm roles, client mappers, and JWT authentication. |
|
||||
| **[Adding Additional IGs](https://git.dghs.gov.bd/riaz.somc/bd-fhir-national/ops/adding-additional-igs.md)** | How to load specialized IGs (MCCoD, IMCI) without breaking the core. |
|
||||
| **[Version Upgrade](https://www.google.com/search?q=ops/version-upgrade-integration.md)** | Procedure for ICD-11 version updates and terminology cache flushing. |
|
||||
| **[Scaling Roadmap](https://git.dghs.gov.bd/riaz.somc/bd-fhir-national/ops/scaling-roadmap.md)** | Thresholds for resource allocation and database partitioning. |
|
||||
| **[Technical Ops](https://git.dghs.gov.bd/riaz.somc/bd-fhir-national/ops/technical-operations-document.md)** | Runbooks for backup, recovery, and daily maintenance. |
|
||||
|
||||
---
|
||||
|
||||
## 🛠 Tech Stack
|
||||
|
||||
* **Engine:** HAPI FHIR 7.2.0 (R4) on Spring Boot 3.2
|
||||
* **Database:** PostgreSQL 15 + pgBouncer (Session Mode)
|
||||
* **Auth:** Keycloak (HRIS Realm)
|
||||
* **CI/CD:** Gitea Actions
|
||||
|
||||
---
|
||||
466
docker-compose.yml
Normal file
466
docker-compose.yml
Normal file
@@ -0,0 +1,466 @@
|
||||
# =============================================================================
|
||||
# BD FHIR National — Production docker-compose.yml
|
||||
#
|
||||
# USAGE:
|
||||
# # First deploy:
|
||||
# docker-compose --env-file .env up -d
|
||||
#
|
||||
# # Scale HAPI replicas (pilot: 1, production: 3):
|
||||
# docker-compose --env-file .env up -d --scale hapi=3
|
||||
#
|
||||
# # Pull updated image and redeploy zero-downtime:
|
||||
# docker-compose --env-file .env pull hapi
|
||||
# docker-compose --env-file .env up -d --no-deps --scale hapi=3 hapi
|
||||
#
|
||||
# # View logs:
|
||||
# docker-compose logs -f hapi
|
||||
#
|
||||
# REQUIRED: .env file in same directory as this file.
|
||||
# Copy .env.example to .env and fill in all values before first deploy.
|
||||
# NEVER commit .env to version control.
|
||||
#
|
||||
# =============================================================================
|
||||
# SCALING ROADMAP
|
||||
# =============================================================================
|
||||
#
|
||||
# PHASE 1 — Pilot (<50 vendors, <10,000 resources/day)
|
||||
# hapi replicas: 1
|
||||
# postgres-fhir: 1 instance, no replication
|
||||
# postgres-audit: 1 instance, no replication
|
||||
# pgbouncer: 1 instance
|
||||
# Expected load: ~0.1 req/s average, ~5 req/s burst
|
||||
# This docker-compose file as written.
|
||||
#
|
||||
# PHASE 2 — Regional rollout (<500 vendors, <100,000 resources/day)
|
||||
# hapi replicas: 3 (--scale hapi=3, no other changes needed)
|
||||
# postgres-fhir: Add streaming replication replica for read queries.
|
||||
# Change: add postgres-fhir-replica service,
|
||||
# configure HAPI read datasource to replica.
|
||||
# postgres-audit: Add streaming replication replica.
|
||||
# pgbouncer: Scale to 2 instances behind a VIP.
|
||||
# nginx: Already stateless. Add second nginx instance.
|
||||
# Session storage: Add Redis for distributed JWKS cache
|
||||
# (currently per-replica in-memory — acceptable at Phase 1).
|
||||
# Changes needed: Add postgres-fhir-replica, postgres-audit-replica,
|
||||
# redis services. Update HAPI datasource config.
|
||||
# Add pgBouncer VIP (HAProxy or keepalived).
|
||||
#
|
||||
# PHASE 3 — National rollout (>500 vendors, >1,000,000 resources/day)
|
||||
# Move to Kubernetes (K8s) or Docker Swarm.
|
||||
# docker-compose is not the right orchestrator at this scale.
|
||||
# Kubernetes equivalents:
|
||||
# hapi → Deployment with HPA (autoscale on CPU/RPS)
|
||||
# postgres-fhir → Patroni cluster (HA PostgreSQL)
|
||||
# postgres-audit → Patroni cluster or managed RDS equivalent
|
||||
# pgbouncer → PgBouncer in K8s sidecar or pgBouncer-as-a-service
|
||||
# nginx → Ingress controller (nginx-ingress or Traefik)
|
||||
# At this phase, partition HAPI JPA tables (see V1 migration comments).
|
||||
# Estimated trigger: 5M total resources in HFJ_RESOURCE.
|
||||
#
|
||||
# =============================================================================
|
||||
|
||||
version: "3.9"
|
||||
|
||||
# =============================================================================
|
||||
# NETWORKS
|
||||
# Isolate services: only nginx is reachable from outside.
|
||||
# hapi is not directly reachable — only via nginx.
|
||||
# postgres services are not reachable from nginx — only from hapi/pgbouncer.
|
||||
# =============================================================================
|
||||
networks:
|
||||
|
||||
# Frontend: nginx ↔ hapi
|
||||
frontend:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.1.0/24
|
||||
|
||||
# Backend-fhir: hapi ↔ pgbouncer-fhir ↔ postgres-fhir
|
||||
backend-fhir:
|
||||
driver: bridge
|
||||
internal: true # no external internet access
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.2.0/24
|
||||
|
||||
# Backend-audit: hapi ↔ pgbouncer-audit ↔ postgres-audit
|
||||
backend-audit:
|
||||
driver: bridge
|
||||
internal: true
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.3.0/24
|
||||
|
||||
# =============================================================================
|
||||
# VOLUMES
|
||||
# Named volumes survive container restarts and image upgrades.
|
||||
# Never use bind mounts for database data in production.
|
||||
# =============================================================================
|
||||
volumes:
|
||||
postgres-fhir-data:
|
||||
driver: local
|
||||
postgres-audit-data:
|
||||
driver: local
|
||||
hapi-logs:
|
||||
driver: local
|
||||
|
||||
# =============================================================================
|
||||
# SERVICES
|
||||
# =============================================================================
|
||||
services:
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# postgres-fhir
|
||||
# HAPI JPA store. Contains all FHIR resources.
|
||||
# Read-write datasource for HAPI.
|
||||
# ---------------------------------------------------------------------------
|
||||
postgres-fhir:
|
||||
image: postgres:15-alpine
|
||||
container_name: bd-postgres-fhir
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- backend-fhir
|
||||
volumes:
|
||||
- postgres-fhir-data:/var/lib/postgresql/data
|
||||
# Custom postgresql.conf tuned for HAPI workload
|
||||
- ./postgres/fhir/postgresql.conf:/etc/postgresql/postgresql.conf:ro
|
||||
# Init script: create application user with limited privileges
|
||||
- ./postgres/fhir/init.sql:/docker-entrypoint-initdb.d/init.sql:ro
|
||||
environment:
|
||||
POSTGRES_DB: ${FHIR_DB_NAME}
|
||||
POSTGRES_USER: ${FHIR_DB_SUPERUSER}
|
||||
POSTGRES_PASSWORD: ${FHIR_DB_SUPERUSER_PASSWORD}
|
||||
command: postgres -c config_file=/etc/postgresql/postgresql.conf
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${FHIR_DB_SUPERUSER} -d ${FHIR_DB_NAME}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
# Resource limits — PostgreSQL should not starve HAPI of memory
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2G
|
||||
reservations:
|
||||
memory: 512M
|
||||
# Do NOT expose port 5432 to host — only accessible via backend-fhir network
|
||||
# If you need psql access for maintenance, use:
|
||||
# docker exec -it bd-postgres-fhir psql -U ${FHIR_DB_SUPERUSER} -d ${FHIR_DB_NAME}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# postgres-audit
|
||||
# Audit store. Contains audit_events and fhir_rejected_submissions.
|
||||
# INSERT-only datasource for HAPI (audit_writer role).
|
||||
# Completely separate from FHIR store — different container, different volume.
|
||||
# ---------------------------------------------------------------------------
|
||||
postgres-audit:
|
||||
image: postgres:15-alpine
|
||||
container_name: bd-postgres-audit
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- backend-audit
|
||||
volumes:
|
||||
- postgres-audit-data:/var/lib/postgresql/data
|
||||
- ./postgres/audit/postgresql.conf:/etc/postgresql/postgresql.conf:ro
|
||||
- ./postgres/audit/init.sql:/docker-entrypoint-initdb.d/init.sql:ro
|
||||
environment:
|
||||
POSTGRES_DB: ${AUDIT_DB_NAME}
|
||||
POSTGRES_USER: ${AUDIT_DB_SUPERUSER}
|
||||
POSTGRES_PASSWORD: ${AUDIT_DB_SUPERUSER_PASSWORD}
|
||||
command: postgres -c config_file=/etc/postgresql/postgresql.conf
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${AUDIT_DB_SUPERUSER} -d ${AUDIT_DB_NAME}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
reservations:
|
||||
memory: 256M
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# pgbouncer-fhir
|
||||
# Connection pool between HAPI and postgres-fhir.
|
||||
# Session mode — required for Hibernate prepared statements.
|
||||
# pool_size=20: at 3 HAPI replicas with HikariCP maxPool=5,
|
||||
# max PostgreSQL connections = 15. pool_size=20 gives 5 headroom.
|
||||
# ---------------------------------------------------------------------------
|
||||
pgbouncer-fhir:
|
||||
image: bitnami/pgbouncer:1.22.1
|
||||
container_name: bd-pgbouncer-fhir
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- backend-fhir
|
||||
environment:
|
||||
POSTGRESQL_HOST: postgres-fhir
|
||||
POSTGRESQL_PORT: "5432"
|
||||
POSTGRESQL_DATABASE: ${FHIR_DB_NAME}
|
||||
POSTGRESQL_USERNAME: ${FHIR_DB_APP_USER}
|
||||
POSTGRESQL_PASSWORD: ${FHIR_DB_APP_PASSWORD}
|
||||
PGBOUNCER_DATABASE: ${FHIR_DB_NAME}
|
||||
PGBOUNCER_POOL_MODE: session
|
||||
PGBOUNCER_MAX_CLIENT_CONN: "100"
|
||||
PGBOUNCER_DEFAULT_POOL_SIZE: "20"
|
||||
PGBOUNCER_MIN_POOL_SIZE: "5"
|
||||
PGBOUNCER_RESERVE_POOL_SIZE: "5"
|
||||
PGBOUNCER_RESERVE_POOL_TIMEOUT: "5"
|
||||
PGBOUNCER_SERVER_IDLE_TIMEOUT: "600"
|
||||
PGBOUNCER_CLIENT_IDLE_TIMEOUT: "60"
|
||||
# Logging — errors and connections only, not queries (query logging
|
||||
# would log patient data to container stdout)
|
||||
PGBOUNCER_LOG_CONNECTIONS: "1"
|
||||
PGBOUNCER_LOG_DISCONNECTIONS: "1"
|
||||
PGBOUNCER_LOG_POOLER_ERRORS: "1"
|
||||
PGBOUNCER_VERBOSE: "0"
|
||||
depends_on:
|
||||
postgres-fhir:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -h localhost -p 5432 -U ${FHIR_DB_APP_USER}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# pgbouncer-audit
|
||||
# Connection pool between HAPI and postgres-audit.
|
||||
# Smaller pool — audit writes are async and lower volume than FHIR writes.
|
||||
# ---------------------------------------------------------------------------
|
||||
pgbouncer-audit:
|
||||
image: bitnami/pgbouncer:1.22.1
|
||||
container_name: bd-pgbouncer-audit
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- backend-audit
|
||||
environment:
|
||||
POSTGRESQL_HOST: postgres-audit
|
||||
POSTGRESQL_PORT: "5432"
|
||||
POSTGRESQL_DATABASE: ${AUDIT_DB_NAME}
|
||||
POSTGRESQL_USERNAME: ${AUDIT_DB_WRITER_USER}
|
||||
POSTGRESQL_PASSWORD: ${AUDIT_DB_WRITER_PASSWORD}
|
||||
PGBOUNCER_DATABASE: ${AUDIT_DB_NAME}
|
||||
PGBOUNCER_POOL_MODE: session
|
||||
PGBOUNCER_MAX_CLIENT_CONN: "50"
|
||||
PGBOUNCER_DEFAULT_POOL_SIZE: "10"
|
||||
PGBOUNCER_MIN_POOL_SIZE: "2"
|
||||
PGBOUNCER_RESERVE_POOL_SIZE: "2"
|
||||
PGBOUNCER_SERVER_IDLE_TIMEOUT: "600"
|
||||
PGBOUNCER_LOG_CONNECTIONS: "1"
|
||||
PGBOUNCER_LOG_DISCONNECTIONS: "1"
|
||||
PGBOUNCER_LOG_POOLER_ERRORS: "1"
|
||||
PGBOUNCER_VERBOSE: "0"
|
||||
depends_on:
|
||||
postgres-audit:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -h localhost -p 5432 -U ${AUDIT_DB_WRITER_USER}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# hapi
|
||||
# BD FHIR National HAPI overlay.
|
||||
# Stateless — no local state, all state in PostgreSQL.
|
||||
# Scale with: docker-compose up -d --scale hapi=3
|
||||
#
|
||||
# REPLICA SCALING NOTE:
|
||||
# When scaling to N replicas, ensure:
|
||||
# 1. pgbouncer-fhir pool_size >= N * HAPI_DB_POOL_SIZE (default: N*5)
|
||||
# 2. pgbouncer-audit pool_size >= N * HAPI_AUDIT_POOL_SIZE (default: N*2)
|
||||
# 3. nginx upstream hapi has all N replica IPs or uses DNS round-robin
|
||||
# (see nginx.conf — uses Docker DNS service name which auto-discovers
|
||||
# all replicas when using --scale)
|
||||
# ---------------------------------------------------------------------------
|
||||
hapi:
|
||||
image: ${HAPI_IMAGE}
|
||||
# container_name intentionally omitted — docker-compose appends _1, _2, _3
|
||||
# when scaling. A fixed container_name breaks --scale.
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- frontend
|
||||
- backend-fhir
|
||||
- backend-audit
|
||||
volumes:
|
||||
- hapi-logs:/app/logs
|
||||
environment:
|
||||
# Spring
|
||||
SPRING_PROFILES_ACTIVE: prod
|
||||
|
||||
# FHIR datasource — routes through pgBouncer
|
||||
SPRING_DATASOURCE_URL: jdbc:postgresql://pgbouncer-fhir:5432/${FHIR_DB_NAME}
|
||||
SPRING_DATASOURCE_USERNAME: ${FHIR_DB_APP_USER}
|
||||
SPRING_DATASOURCE_PASSWORD: ${FHIR_DB_APP_PASSWORD}
|
||||
SPRING_DATASOURCE_DRIVER_CLASS_NAME: org.postgresql.Driver
|
||||
|
||||
# HikariCP — FHIR datasource pool
|
||||
# 5 connections per replica × N replicas = N*5 total PostgreSQL connections
|
||||
# At 3 replicas: 15 connections → fits in pgBouncer pool_size=20
|
||||
SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE: "5"
|
||||
SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE: "2"
|
||||
SPRING_DATASOURCE_HIKARI_CONNECTION_TIMEOUT: "30000"
|
||||
SPRING_DATASOURCE_HIKARI_IDLE_TIMEOUT: "600000"
|
||||
SPRING_DATASOURCE_HIKARI_MAX_LIFETIME: "1800000"
|
||||
SPRING_DATASOURCE_HIKARI_POOL_NAME: fhir-pool
|
||||
# pgBouncer session mode: prepared statements work.
|
||||
# Keep this false for compatibility — pgBouncer manages statement lifecycle.
|
||||
SPRING_DATASOURCE_HIKARI_DATA_SOURCE_PROPERTIES_PREPARESTATEMENT: "false"
|
||||
|
||||
# Audit datasource — INSERT-only, routes through pgBouncer
|
||||
AUDIT_DATASOURCE_URL: jdbc:postgresql://pgbouncer-audit:5432/${AUDIT_DB_NAME}
|
||||
AUDIT_DATASOURCE_USERNAME: ${AUDIT_DB_WRITER_USER}
|
||||
AUDIT_DATASOURCE_PASSWORD: ${AUDIT_DB_WRITER_PASSWORD}
|
||||
|
||||
# HikariCP — audit datasource pool
|
||||
# Smaller pool — audit writes are async
|
||||
AUDIT_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE: "2"
|
||||
AUDIT_DATASOURCE_HIKARI_MINIMUM_IDLE: "1"
|
||||
AUDIT_DATASOURCE_HIKARI_POOL_NAME: audit-pool
|
||||
|
||||
# Flyway — FHIR schema migrations
|
||||
SPRING_FLYWAY_URL: jdbc:postgresql://postgres-fhir:5432/${FHIR_DB_NAME}
|
||||
SPRING_FLYWAY_USER: ${FHIR_DB_SUPERUSER}
|
||||
SPRING_FLYWAY_PASSWORD: ${FHIR_DB_SUPERUSER_PASSWORD}
|
||||
# Flyway connects directly to PostgreSQL (bypassing pgBouncer) for
|
||||
# migrations — pgBouncer session mode is incompatible with DDL in
|
||||
# some edge cases. Direct connection is safer for schema changes.
|
||||
|
||||
# Flyway — Audit schema migrations (separate datasource)
|
||||
AUDIT_FLYWAY_URL: jdbc:postgresql://postgres-audit:5432/${AUDIT_DB_NAME}
|
||||
AUDIT_FLYWAY_USER: ${AUDIT_DB_SUPERUSER}
|
||||
AUDIT_FLYWAY_PASSWORD: ${AUDIT_DB_SUPERUSER_PASSWORD}
|
||||
|
||||
# HAPI FHIR
|
||||
HAPI_FHIR_SERVER_ADDRESS: https://fhir.dghs.gov.bd/fhir
|
||||
HAPI_FHIR_FHIR_VERSION: R4
|
||||
|
||||
# OCL terminology service
|
||||
HAPI_OCL_BASE_URL: https://tr.ocl.dghs.gov.bd/api/fhir
|
||||
HAPI_OCL_TIMEOUT_SECONDS: "10"
|
||||
HAPI_OCL_RETRY_ATTEMPTS: "2"
|
||||
|
||||
# Cluster validator
|
||||
HAPI_CLUSTER_VALIDATOR_URL: https://icd11.dghs.gov.bd/cluster/validate
|
||||
HAPI_CLUSTER_VALIDATOR_TIMEOUT_SECONDS: "10"
|
||||
|
||||
# Keycloak
|
||||
KEYCLOAK_ISSUER: https://auth.dghs.gov.bd/realms/hris
|
||||
KEYCLOAK_JWKS_URL: https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/certs
|
||||
KEYCLOAK_REQUIRED_ROLE: mci-api
|
||||
KEYCLOAK_ADMIN_ROLE: fhir-admin
|
||||
# JWKS cache: 1 hour TTL, re-fetch on unknown kid
|
||||
KEYCLOAK_JWKS_CACHE_TTL_SECONDS: "3600"
|
||||
|
||||
# BD Core IG
|
||||
HAPI_IG_PACKAGE_CLASSPATH: classpath:packages/bd.gov.dghs.core-0.2.1.tgz
|
||||
HAPI_IG_VERSION: 0.2.1
|
||||
|
||||
# Terminology cache
|
||||
HAPI_TERMINOLOGY_CACHE_TTL_SECONDS: "86400"
|
||||
|
||||
# JVM options — override defaults from Dockerfile
|
||||
JAVA_OPTS: >-
|
||||
-XX:+UseContainerSupport
|
||||
-XX:MaxRAMPercentage=75.0
|
||||
-XX:+ExitOnOutOfMemoryError
|
||||
-XX:+HeapDumpOnOutOfMemoryError
|
||||
-XX:HeapDumpPath=/tmp/heapdump.hprof
|
||||
-Djava.security.egd=file:/dev/urandom
|
||||
-Dfile.encoding=UTF-8
|
||||
-Duser.timezone=UTC
|
||||
-Dspring.profiles.active=prod
|
||||
|
||||
# Logging
|
||||
LOGGING_LEVEL_ROOT: WARN
|
||||
LOGGING_LEVEL_BD_GOV_DGHS: INFO
|
||||
LOGGING_LEVEL_CA_UHN_HAPI: WARN
|
||||
LOGGING_LEVEL_ORG_SPRINGFRAMEWORK: WARN
|
||||
# Set to DEBUG temporarily during initial deployment verification,
|
||||
# then revert to WARN. DEBUG logs contain full resource payloads.
|
||||
LOGGING_LEVEL_BD_GOV_DGHS_FHIR_INTERCEPTOR: INFO
|
||||
LOGGING_LEVEL_BD_GOV_DGHS_FHIR_TERMINOLOGY: INFO
|
||||
LOGGING_LEVEL_BD_GOV_DGHS_FHIR_VALIDATOR: INFO
|
||||
|
||||
depends_on:
|
||||
pgbouncer-fhir:
|
||||
condition: service_healthy
|
||||
pgbouncer-audit:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL",
|
||||
"curl --fail --silent --show-error http://localhost:8080/actuator/health/liveness || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
start_period: 120s
|
||||
retries: 3
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 4G
|
||||
reservations:
|
||||
memory: 2G
|
||||
# PHASE 1: replicas=1
|
||||
# PHASE 2: replicas=3 (update here or use --scale flag)
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 10s
|
||||
max_attempts: 3
|
||||
window: 120s
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# nginx
|
||||
# Reverse proxy with TLS termination.
|
||||
# Certificates managed by centralised nginx proxy — see Challenge E resolution.
|
||||
# This nginx handles: upstream routing, rate limiting, request ID injection.
|
||||
# ---------------------------------------------------------------------------
|
||||
nginx:
|
||||
image: nginx:1.25-alpine
|
||||
container_name: bd-nginx
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- frontend
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./nginx/conf.d:/etc/nginx/conf.d:ro
|
||||
# TLS certificates — provisioned by centralised nginx proxy / government CA
|
||||
# Mount path must match ssl_certificate directives in nginx.conf
|
||||
- ${TLS_CERT_PATH}:/etc/nginx/certs/server.crt:ro
|
||||
- ${TLS_KEY_PATH}:/etc/nginx/certs/server.key:ro
|
||||
depends_on:
|
||||
hapi:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nginx -t && curl --fail --silent http://localhost/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# =============================================================================
|
||||
# NOTES ON WHAT IS NOT IN THIS FILE
|
||||
# =============================================================================
|
||||
#
|
||||
# ELK STACK (Elasticsearch, Logstash, Kibana):
|
||||
# Not included. At pilot phase, structured JSON logs written to
|
||||
# hapi-logs volume are sufficient. Ship logs to ELK via Filebeat
|
||||
# agent running on the host (outside Docker) to avoid coupling
|
||||
# the FHIR server uptime to the ELK stack uptime.
|
||||
# Add Filebeat config in ops/ when ELK is provisioned.
|
||||
#
|
||||
# KEYCLOAK:
|
||||
# Not included. Keycloak is an existing national service at
|
||||
# https://auth.dghs.gov.bd — not deployed here.
|
||||
#
|
||||
# OCL TERMINOLOGY SERVER:
|
||||
# Not included. External service at https://tr.ocl.dghs.gov.bd — not deployed here.
|
||||
#
|
||||
# CLUSTER VALIDATOR:
|
||||
# Not included. External service at https://icd11.dghs.gov.bd — not deployed here.
|
||||
172
env.example
Normal file
172
env.example
Normal file
@@ -0,0 +1,172 @@
|
||||
# =============================================================================
|
||||
# BD FHIR National — Environment Variables
|
||||
#
|
||||
# INSTRUCTIONS:
|
||||
# cp .env.example .env
|
||||
# Fill in all values marked <CHANGE_ME>
|
||||
# chmod 600 .env
|
||||
# NEVER commit .env to version control
|
||||
# Store the filled .env in your secrets vault
|
||||
#
|
||||
# PASSWORD REQUIREMENTS:
|
||||
# All passwords minimum 32 characters.
|
||||
# Generate with: openssl rand -base64 32
|
||||
# Each password must be unique — never reuse across services.
|
||||
# Rotate every 90 days per DGHS security policy.
|
||||
# =============================================================================
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# HAPI Docker Image
|
||||
# Format: your-registry.dghs.gov.bd/bd-fhir-hapi:{version}
|
||||
# Update this value to deploy a new image version.
|
||||
# -----------------------------------------------------------------------------
|
||||
HAPI_IMAGE=your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# FHIR Database (postgres-fhir)
|
||||
#
|
||||
# SUPERUSER: used by Flyway migrations only.
|
||||
# Connects directly to postgres-fhir (bypasses pgBouncer).
|
||||
# Must have CREATE TABLE, CREATE INDEX, CREATE SEQUENCE privileges.
|
||||
#
|
||||
# APP USER: used by HAPI JPA at runtime.
|
||||
# Connects via pgBouncer (session mode).
|
||||
# Granted SELECT, INSERT, UPDATE, DELETE on all HAPI JPA tables.
|
||||
# Created by postgres/fhir/init.sh on first container start.
|
||||
# -----------------------------------------------------------------------------
|
||||
FHIR_DB_NAME=fhirdb
|
||||
FHIR_DB_SUPERUSER=postgres
|
||||
FHIR_DB_SUPERUSER_PASSWORD=<CHANGE_ME>
|
||||
|
||||
FHIR_DB_APP_USER=hapi_app
|
||||
FHIR_DB_APP_PASSWORD=<CHANGE_ME>
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Audit Database (postgres-audit)
|
||||
#
|
||||
# SUPERUSER: used by Flyway audit migrations only.
|
||||
# Connects directly to postgres-audit (bypasses pgBouncer).
|
||||
# Must have CREATE TABLE, CREATE SCHEMA, CREATE FUNCTION privileges.
|
||||
#
|
||||
# WRITER: used by HAPI audit datasource at runtime.
|
||||
# Connects via pgBouncer (session mode).
|
||||
# INSERT only on audit schema — no SELECT, UPDATE, DELETE, TRUNCATE.
|
||||
# Created by postgres/audit/init.sh on first container start.
|
||||
#
|
||||
# MAINTAINER: used by monthly partition maintenance cron job only.
|
||||
# EXECUTE on audit.create_next_month_partitions() function only.
|
||||
# Never used by the HAPI JVM.
|
||||
# Created by postgres/audit/init.sh on first container start.
|
||||
# -----------------------------------------------------------------------------
|
||||
AUDIT_DB_NAME=auditdb
|
||||
AUDIT_DB_SUPERUSER=postgres
|
||||
AUDIT_DB_SUPERUSER_PASSWORD=<CHANGE_ME>
|
||||
|
||||
AUDIT_DB_WRITER_USER=audit_writer_login
|
||||
AUDIT_DB_WRITER_PASSWORD=<CHANGE_ME>
|
||||
|
||||
AUDIT_DB_MAINTAINER_USER=audit_maintainer_login
|
||||
AUDIT_DB_MAINTAINER_PASSWORD=<CHANGE_ME>
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Flyway — FHIR schema migrations
|
||||
# Connects DIRECTLY to postgres-fhir (not pgBouncer) using superuser.
|
||||
# URL must point to the postgres-fhir container, not pgbouncer-fhir.
|
||||
# -----------------------------------------------------------------------------
|
||||
SPRING_FLYWAY_URL=jdbc:postgresql://postgres-fhir:5432/fhirdb
|
||||
SPRING_FLYWAY_USER=<CHANGE_ME_same_as_FHIR_DB_SUPERUSER>
|
||||
SPRING_FLYWAY_PASSWORD=<CHANGE_ME_same_as_FHIR_DB_SUPERUSER_PASSWORD>
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Flyway — Audit schema migrations
|
||||
# Connects DIRECTLY to postgres-audit (not pgBouncer) using superuser.
|
||||
# -----------------------------------------------------------------------------
|
||||
AUDIT_FLYWAY_URL=jdbc:postgresql://postgres-audit:5432/auditdb
|
||||
AUDIT_FLYWAY_USER=<CHANGE_ME_same_as_AUDIT_DB_SUPERUSER>
|
||||
AUDIT_FLYWAY_PASSWORD=<CHANGE_ME_same_as_AUDIT_DB_SUPERUSER_PASSWORD>
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# HAPI FHIR datasource — runtime connection via pgBouncer
|
||||
# -----------------------------------------------------------------------------
|
||||
SPRING_DATASOURCE_URL=jdbc:postgresql://pgbouncer-fhir:5432/fhirdb
|
||||
SPRING_DATASOURCE_USERNAME=<CHANGE_ME_same_as_FHIR_DB_APP_USER>
|
||||
SPRING_DATASOURCE_PASSWORD=<CHANGE_ME_same_as_FHIR_DB_APP_PASSWORD>
|
||||
|
||||
# HikariCP pool — FHIR datasource
|
||||
# 5 connections per replica. At 3 replicas: 15 total PostgreSQL connections.
|
||||
# pgBouncer pool_size=20 — 5 headroom. Do not exceed without updating pgBouncer.
|
||||
SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE=5
|
||||
SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE=2
|
||||
SPRING_DATASOURCE_HIKARI_POOL_NAME=fhir-pool
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Audit datasource — runtime connection via pgBouncer (INSERT-only)
|
||||
# -----------------------------------------------------------------------------
|
||||
AUDIT_DATASOURCE_URL=jdbc:postgresql://pgbouncer-audit:5432/auditdb
|
||||
AUDIT_DATASOURCE_USERNAME=<CHANGE_ME_same_as_AUDIT_DB_WRITER_USER>
|
||||
AUDIT_DATASOURCE_PASSWORD=<CHANGE_ME_same_as_AUDIT_DB_WRITER_PASSWORD>
|
||||
|
||||
# HikariCP pool — audit datasource
|
||||
# Small pool — audit writes are async and low-volume.
|
||||
AUDIT_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE=2
|
||||
AUDIT_DATASOURCE_HIKARI_MINIMUM_IDLE=1
|
||||
AUDIT_DATASOURCE_HIKARI_POOL_NAME=audit-pool
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# HAPI FHIR server
|
||||
# -----------------------------------------------------------------------------
|
||||
HAPI_FHIR_SERVER_ADDRESS=https://fhir.dghs.gov.bd/fhir
|
||||
HAPI_FHIR_FHIR_VERSION=R4
|
||||
|
||||
# BD Core IG — must match the .tgz filename in src/main/resources/packages/
|
||||
HAPI_IG_PACKAGE_CLASSPATH=classpath:packages/bd.gov.dghs.core-0.2.1.tgz
|
||||
HAPI_IG_VERSION=0.2.1
|
||||
|
||||
# Terminology cache TTL — 24 hours in seconds.
|
||||
# Flush via DELETE /admin/terminology/cache after ICD-11 version upgrade.
|
||||
HAPI_TERMINOLOGY_CACHE_TTL_SECONDS=86400
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# OCL — national terminology server
|
||||
# -----------------------------------------------------------------------------
|
||||
HAPI_OCL_BASE_URL=https://tr.ocl.dghs.gov.bd/api/fhir
|
||||
HAPI_OCL_TIMEOUT_SECONDS=10
|
||||
HAPI_OCL_RETRY_ATTEMPTS=2
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cluster validator middleware
|
||||
# -----------------------------------------------------------------------------
|
||||
HAPI_CLUSTER_VALIDATOR_URL=https://icd11.dghs.gov.bd/cluster/validate
|
||||
HAPI_CLUSTER_VALIDATOR_TIMEOUT_SECONDS=10
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Keycloak — national identity provider
|
||||
# Realm: hris
|
||||
# Do not change these URLs unless the Keycloak deployment changes.
|
||||
# -----------------------------------------------------------------------------
|
||||
KEYCLOAK_ISSUER=https://auth.dghs.gov.bd/realms/hris
|
||||
KEYCLOAK_JWKS_URL=https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/certs
|
||||
KEYCLOAK_REQUIRED_ROLE=mci-api
|
||||
KEYCLOAK_ADMIN_ROLE=fhir-admin
|
||||
|
||||
# JWKS cache TTL in seconds (1 hour).
|
||||
# Keys are re-fetched immediately on unknown kid regardless of TTL.
|
||||
KEYCLOAK_JWKS_CACHE_TTL_SECONDS=3600
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Spring Boot
|
||||
# -----------------------------------------------------------------------------
|
||||
SPRING_PROFILES_ACTIVE=prod
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Logging
|
||||
# Set individual levels to DEBUG temporarily during initial deployment only.
|
||||
# Never leave DEBUG enabled in production — FHIR resources contain patient data.
|
||||
# -----------------------------------------------------------------------------
|
||||
LOGGING_LEVEL_ROOT=WARN
|
||||
LOGGING_LEVEL_BD_GOV_DGHS=INFO
|
||||
LOGGING_LEVEL_BD_GOV_DGHS_FHIR_INTERCEPTOR=INFO
|
||||
LOGGING_LEVEL_BD_GOV_DGHS_FHIR_TERMINOLOGY=INFO
|
||||
LOGGING_LEVEL_BD_GOV_DGHS_FHIR_VALIDATOR=INFO
|
||||
LOGGING_LEVEL_CA_UHN_HAPI=WARN
|
||||
LOGGING_LEVEL_ORG_SPRINGFRAMEWORK=WARN
|
||||
206
hapi-overlay/Dockerfile
Normal file
206
hapi-overlay/Dockerfile
Normal file
@@ -0,0 +1,206 @@
|
||||
# =============================================================================
|
||||
# BD FHIR National — HAPI Overlay Dockerfile
|
||||
# Multi-stage build: Maven builder + lean JRE runtime
|
||||
#
|
||||
# BUILD (CI machine):
|
||||
# docker build \
|
||||
# --build-arg IG_PACKAGE=bd.gov.dghs.core-0.2.1.tgz \
|
||||
# --build-arg BUILD_VERSION=1.0.0 \
|
||||
# --build-arg GIT_COMMIT=$(git rev-parse --short HEAD) \
|
||||
# -t your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0 \
|
||||
# -f hapi-overlay/Dockerfile \
|
||||
# .
|
||||
#
|
||||
# PUSH:
|
||||
# docker push your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0
|
||||
#
|
||||
# The production server never builds — it only pulls.
|
||||
# The IG package.tgz must be present at:
|
||||
# hapi-overlay/src/main/resources/packages/${IG_PACKAGE}
|
||||
# before the build runs. CI pipeline is responsible for placing it there.
|
||||
#
|
||||
# IG VERSION UPGRADE:
|
||||
# 1. Drop new package.tgz into src/main/resources/packages/
|
||||
# 2. Update IG_PACKAGE build arg to new filename
|
||||
# 3. Rebuild and push new image tag
|
||||
# 4. Redeploy via docker-compose pull + up
|
||||
# 5. Call cache flush endpoint (see ops/version-upgrade-integration.md)
|
||||
# =============================================================================
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# STAGE 1: Builder
|
||||
# Uses full Maven + JDK image. Result discarded after JAR is built.
|
||||
# Only the fat JAR is carried forward to the runtime stage.
|
||||
# -----------------------------------------------------------------------------
|
||||
FROM maven:3.9.6-eclipse-temurin-17 AS builder
|
||||
|
||||
LABEL stage=builder
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Copy parent POM first — allows Docker layer caching to skip dependency
|
||||
# download if only source code changes (not POM dependencies).
|
||||
COPY pom.xml ./pom.xml
|
||||
COPY hapi-overlay/pom.xml ./hapi-overlay/pom.xml
|
||||
|
||||
# Download all dependencies into the Maven local repository cache layer.
|
||||
# This layer is invalidated only when a POM file changes.
|
||||
# On a CI machine with layer caching enabled, this saves 3-5 minutes
|
||||
# per build when only Java source changes.
|
||||
RUN mvn dependency:go-offline \
|
||||
--batch-mode \
|
||||
--no-transfer-progress \
|
||||
-pl hapi-overlay \
|
||||
-am
|
||||
|
||||
# Now copy source — this layer changes on every code commit.
|
||||
COPY hapi-overlay/src ./hapi-overlay/src
|
||||
|
||||
# Build fat JAR. Skip tests here — tests run in a separate CI stage
|
||||
# against TestContainers before the Docker build is invoked.
|
||||
# If your CI runs tests inside Docker, remove -DskipTests.
|
||||
RUN mvn package \
|
||||
--batch-mode \
|
||||
--no-transfer-progress \
|
||||
-pl hapi-overlay \
|
||||
-am \
|
||||
-DskipTests \
|
||||
-Dspring-boot.repackage.skip=false
|
||||
|
||||
# Verify the fat JAR was produced with the expected name
|
||||
RUN ls -lh /build/hapi-overlay/target/bd-fhir-hapi.jar && \
|
||||
echo "JAR size: $(du -sh /build/hapi-overlay/target/bd-fhir-hapi.jar | cut -f1)"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# STAGE 2: Runtime
|
||||
# Lean JRE image — no JDK, no Maven, no build tools.
|
||||
# Attack surface reduced. Image size ~300MB vs ~800MB for builder.
|
||||
# -----------------------------------------------------------------------------
|
||||
FROM eclipse-temurin:17-jre-jammy AS runtime
|
||||
|
||||
# Build arguments — embedded in image labels for traceability.
|
||||
# Every production image must be traceable to a specific git commit
|
||||
# and IG version. If you cannot answer "what IG version is running",
|
||||
# you cannot validate your validation engine.
|
||||
ARG BUILD_VERSION=unknown
|
||||
ARG GIT_COMMIT=unknown
|
||||
ARG IG_PACKAGE=unknown
|
||||
ARG BUILD_TIMESTAMP
|
||||
# Set default build timestamp if not provided
|
||||
RUN if [ -z "${BUILD_TIMESTAMP}" ]; then BUILD_TIMESTAMP=$(date -u +%Y-%m-%dT%H:%M:%SZ); fi
|
||||
|
||||
LABEL org.opencontainers.image.title="BD FHIR National HAPI Server" \
|
||||
org.opencontainers.image.description="National FHIR R4 repository and validation engine, Bangladesh" \
|
||||
org.opencontainers.image.vendor="DGHS/MoHFW Bangladesh" \
|
||||
org.opencontainers.image.version="${BUILD_VERSION}" \
|
||||
org.opencontainers.image.revision="${GIT_COMMIT}" \
|
||||
bd.gov.dghs.ig.version="${IG_PACKAGE}" \
|
||||
bd.gov.dghs.fhir.version="R4" \
|
||||
bd.gov.dghs.hapi.version="7.2.0"
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# SYSTEM SETUP
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Create non-root user. Running as root inside a container is a security
|
||||
# vulnerability — if the JVM is exploited, the attacker gets root on the host
|
||||
# if the container runs privileged or has volume mounts.
|
||||
RUN groupadd --gid 10001 hapi && \
|
||||
useradd --uid 10001 --gid hapi --shell /bin/false --no-create-home hapi
|
||||
|
||||
# Install curl for Docker health checks.
|
||||
# tini: init process to reap zombie processes and forward signals correctly.
|
||||
# Without tini, SIGTERM from docker stop is not forwarded to the JVM and
|
||||
# the container is killed after the stop timeout (ungraceful shutdown).
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
tini \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Application directory
|
||||
WORKDIR /app
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# COPY ARTIFACTS FROM BUILDER
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
COPY --from=builder /build/hapi-overlay/target/bd-fhir-hapi.jar /app/bd-fhir-hapi.jar
|
||||
|
||||
# Set correct ownership — hapi user must be able to read the JAR
|
||||
RUN chown hapi:hapi /app/bd-fhir-hapi.jar
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# RUNTIME CONFIGURATION
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Switch to non-root user before any further commands
|
||||
USER hapi
|
||||
|
||||
# JVM tuning arguments.
|
||||
# These are defaults — override via JAVA_OPTS environment variable
|
||||
# in docker-compose.yml for environment-specific tuning.
|
||||
#
|
||||
# -XX:+UseContainerSupport
|
||||
# Enables JVM to read CPU/memory limits from cgroup (Docker constraints).
|
||||
# Without this, JVM reads host machine memory and over-allocates heap.
|
||||
# Available since Java 8u191 — always present in temurin:17.
|
||||
#
|
||||
# -XX:MaxRAMPercentage=75.0
|
||||
# Heap = 75% of container memory limit.
|
||||
# For a 2GB container: heap = 1.5GB. Remaining 512MB for non-heap
|
||||
# (Metaspace, thread stacks, code cache, direct buffers).
|
||||
# HAPI 7.x with full IG loaded needs ~512MB heap minimum.
|
||||
# Recommended container memory: 2GB minimum, 4GB for production.
|
||||
#
|
||||
# -XX:+ExitOnOutOfMemoryError
|
||||
# Kill the JVM immediately on OOM instead of limping along in a broken
|
||||
# state. Docker will restart the container. Prefer clean restart over
|
||||
# degraded service.
|
||||
#
|
||||
# -Djava.security.egd=file:/dev/urandom
|
||||
# Prevents SecureRandom from blocking on /dev/random in containerised
|
||||
# environments where hardware entropy is limited.
|
||||
# Critical for JWT validation performance — Nimbus JOSE uses SecureRandom.
|
||||
ENV JAVA_OPTS="\
|
||||
-XX:+UseContainerSupport \
|
||||
-XX:MaxRAMPercentage=75.0 \
|
||||
-XX:+ExitOnOutOfMemoryError \
|
||||
-XX:+HeapDumpOnOutOfMemoryError \
|
||||
-XX:HeapDumpPath=/tmp/heapdump.hprof \
|
||||
-Djava.security.egd=file:/dev/urandom \
|
||||
-Dfile.encoding=UTF-8 \
|
||||
-Duser.timezone=UTC"
|
||||
|
||||
# Spring profile — overridable via environment variable in docker-compose
|
||||
ENV SPRING_PROFILES_ACTIVE=prod
|
||||
|
||||
# FHIR server base URL — must match nginx configuration
|
||||
ENV HAPI_FHIR_SERVER_ADDRESS=https://fhir.dghs.gov.bd/fhir
|
||||
|
||||
# Expose HTTP port. nginx terminates TLS and proxies to this port.
|
||||
# Do NOT expose this port directly — it must only be reachable via nginx.
|
||||
EXPOSE 8080
|
||||
|
||||
# Health check — used by Docker and docker-compose depends_on condition.
|
||||
# /actuator/health returns 200 when application is fully started and
|
||||
# all health indicators pass (including the custom AuditDataSourceHealthIndicator).
|
||||
# --fail-with-body: return non-zero exit on HTTP error responses.
|
||||
# start_period: allow 120s for startup (IG loading + Flyway migrations).
|
||||
HEALTHCHECK \
|
||||
--interval=30s \
|
||||
--timeout=10s \
|
||||
--start-period=120s \
|
||||
--retries=3 \
|
||||
CMD curl --fail --silent --show-error \
|
||||
http://localhost:8080/actuator/health/liveness || exit 1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# ENTRYPOINT
|
||||
# tini as PID 1 → JVM as child process.
|
||||
# tini handles SIGTERM correctly: forwards to JVM, waits for graceful
|
||||
# shutdown, then exits. Without tini, docker stop sends SIGTERM to PID 1
|
||||
# (the JVM) but the JVM may ignore it depending on signal handling setup.
|
||||
# -----------------------------------------------------------------------------
|
||||
ENTRYPOINT ["/usr/bin/tini", "--"]
|
||||
CMD ["sh", "-c", "exec java ${JAVA_OPTS} -jar /app/bd-fhir-hapi.jar"]
|
||||
318
hapi-overlay/pom.xml
Normal file
318
hapi-overlay/pom.xml
Normal file
@@ -0,0 +1,318 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
|
||||
https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<!-- =========================================================
|
||||
BD Core FHIR National Repository — HAPI Overlay Module
|
||||
This module produces the fat JAR that runs in the container.
|
||||
All runtime dependencies declared here.
|
||||
========================================================= -->
|
||||
|
||||
<parent>
|
||||
<groupId>bd.gov.dghs</groupId>
|
||||
<artifactId>bd-fhir-national</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>hapi-overlay</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>BD FHIR National — HAPI Overlay</name>
|
||||
<description>
|
||||
Custom HAPI FHIR overlay for the BD national FHIR repository.
|
||||
Includes: Keycloak JWT interceptor, BD Core IG validation chain,
|
||||
OCL terminology integration, cluster expression validator,
|
||||
audit event emitter, and rejected submission sink.
|
||||
</description>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<!-- =======================================================
|
||||
HAPI FHIR CORE — JPA server stack
|
||||
Versions managed by hapi-fhir-bom in parent POM.
|
||||
======================================================= -->
|
||||
|
||||
<!-- JPA server starter — brings in Spring Boot web, JPA,
|
||||
Hibernate, Jackson, and HAPI servlet infrastructure -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-jpaserver-starter</artifactId>
|
||||
<!-- Version from BOM. Do NOT pin version here. -->
|
||||
</dependency>
|
||||
|
||||
<!-- FHIR R4 model classes — Patient, Condition, AuditEvent, etc. -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-structures-r4</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Validation support framework — IValidationSupport chain -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-validation</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Validation resources — built-in FHIR R4 profiles and
|
||||
code system content (LOINC, SNOMED stubs, etc.) -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-validation-resources-r4</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- NPM package support — loads BD Core IG package.tgz
|
||||
via NpmPackageValidationSupport -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-npm-packages</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Remote terminology service — base class for our custom
|
||||
BdTerminologyValidationSupport. We extend this to force
|
||||
$validate-code and suppress $expand. -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-terminology</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- IInstanceValidator — used by FhirValidator to run
|
||||
profile validation on submitted resources -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-validation-resources-r4</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
SPRING BOOT STARTERS
|
||||
Versions managed by spring-boot-dependencies in parent.
|
||||
======================================================= -->
|
||||
|
||||
<!-- Web MVC — embedded Tomcat, DispatcherServlet -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-web</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- JPA / Hibernate — HAPI JPA persistence layer -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-data-jpa</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Actuator — /actuator/health, /actuator/info, /actuator/metrics.
|
||||
Health endpoints used by load balancer liveness/readiness probes.
|
||||
Custom AuditDataSourceHealthIndicator registered here. -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-actuator</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Validation (Bean Validation / Hibernate Validator) —
|
||||
used for @Valid on REST controller inputs -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-validation</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
DATABASE
|
||||
======================================================= -->
|
||||
|
||||
<!-- PostgreSQL JDBC driver — runtime only, not needed at compile -->
|
||||
<dependency>
|
||||
<groupId>org.postgresql</groupId>
|
||||
<artifactId>postgresql</artifactId>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
|
||||
<!-- Flyway core — schema migration engine.
|
||||
Runs V1__hapi_schema.sql and V2__audit_schema.sql on startup
|
||||
before HAPI JPA initialises. -->
|
||||
<dependency>
|
||||
<groupId>org.flywaydb</groupId>
|
||||
<artifactId>flyway-core</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Flyway PostgreSQL dialect — required for Flyway 10+.
|
||||
Without this artifact, Flyway silently skips migrations
|
||||
against PostgreSQL datasources. -->
|
||||
<dependency>
|
||||
<groupId>org.flywaydb</groupId>
|
||||
<artifactId>flyway-database-postgresql</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- HikariCP — connection pool.
|
||||
Spring Boot auto-configures HikariCP when it is on classpath.
|
||||
Explicit declaration ensures version alignment with parent BOM. -->
|
||||
<dependency>
|
||||
<groupId>com.zaxxer</groupId>
|
||||
<artifactId>HikariCP</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
SECURITY — JWT VALIDATION
|
||||
======================================================= -->
|
||||
|
||||
<!-- Nimbus JOSE+JWT — JWT parsing, signature verification,
|
||||
and JWKS remote key set with cache.
|
||||
Used by KeycloakJwtInterceptor.
|
||||
RemoteJWKSet provides kid-based cache invalidation:
|
||||
keys cached 1 hour, re-fetched on unknown kid. -->
|
||||
<dependency>
|
||||
<groupId>com.nimbusds</groupId>
|
||||
<artifactId>nimbus-jose-jwt</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
HTTP CLIENT — OCL and cluster validator calls
|
||||
======================================================= -->
|
||||
|
||||
<!-- Apache HttpClient 5 — used by BdTerminologyValidationSupport
|
||||
for OCL $validate-code calls, and ClusterExpressionValidator
|
||||
for https://icd11.dghs.gov.bd/cluster/validate calls.
|
||||
Separate from the HttpClient that HAPI uses internally
|
||||
(HAPI uses its own managed instance). -->
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents.client5</groupId>
|
||||
<artifactId>httpclient5</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
OBSERVABILITY
|
||||
======================================================= -->
|
||||
|
||||
<!-- Micrometer Prometheus registry — exposes /actuator/prometheus
|
||||
for Prometheus scraping. Optional but included from day one
|
||||
for national-scale observability readiness. -->
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-registry-prometheus</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
UTILITIES
|
||||
======================================================= -->
|
||||
|
||||
<!-- Jackson — JSON serialisation for audit log payloads,
|
||||
OCL API responses, cluster validator responses.
|
||||
Managed by Spring Boot BOM. -->
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.datatype</groupId>
|
||||
<artifactId>jackson-datatype-jsr310</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- SLF4J / Logback — Spring Boot default logging.
|
||||
Logback configured in application.yaml for structured JSON
|
||||
output suitable for ELK ingestion. -->
|
||||
<dependency>
|
||||
<groupId>net.logstash.logback</groupId>
|
||||
<artifactId>logstash-logback-encoder</artifactId>
|
||||
<version>7.4</version>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
TEST DEPENDENCIES
|
||||
======================================================= -->
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-test</artifactId>
|
||||
<scope>test</scope>
|
||||
<!-- Excludes vintage JUnit 4 engine — JUnit 5 only -->
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.junit.vintage</groupId>
|
||||
<artifactId>junit-vintage-engine</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<!-- TestContainers — PostgreSQL container for integration tests.
|
||||
Tests spin up a real PostgreSQL 15 container, run Flyway
|
||||
migrations, and validate the full persistence layer.
|
||||
Never use H2 — not even in tests. -->
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>postgresql</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<!-- HAPI FHIR test utilities — FhirContext in tests -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-test-utilities</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<!-- WireMock — mock OCL and cluster validator in unit tests.
|
||||
Allows testing 422 rejection paths without live OCL. -->
|
||||
<dependency>
|
||||
<groupId>org.wiremock</groupId>
|
||||
<artifactId>wiremock-standalone</artifactId>
|
||||
<version>3.5.4</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
|
||||
<!-- Spring Boot Maven plugin — repackages JAR as fat JAR
|
||||
and embeds build-info.properties for /actuator/info.
|
||||
Configured in parent pluginManagement. -->
|
||||
<plugin>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-maven-plugin</artifactId>
|
||||
<!-- Configuration inherited from parent pluginManagement -->
|
||||
</plugin>
|
||||
|
||||
<!-- Compiler plugin — Java 17, inherited from parent -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
</plugin>
|
||||
|
||||
<!-- Surefire — JUnit 5, inherited from parent -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
</plugin>
|
||||
|
||||
<!-- Resources plugin — ensures packages/ directory with
|
||||
bd.gov.dghs.core-0.2.1.tgz is included in the fat JAR
|
||||
under classpath:packages/ -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<configuration>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/main/resources</directory>
|
||||
<filtering>false</filtering>
|
||||
<!-- filtering=false is critical: the .tgz is binary.
|
||||
Maven resource filtering on binary files corrupts them. -->
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
</plugins>
|
||||
|
||||
<!-- Ensure the fat JAR is named predictably for Docker COPY -->
|
||||
<finalName>bd-fhir-hapi</finalName>
|
||||
|
||||
</build>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,30 @@
|
||||
package bd.gov.dghs.fhir;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.scheduling.annotation.EnableAsync;
|
||||
|
||||
/**
|
||||
* BD FHIR National Repository — Spring Boot application entry point.
|
||||
*
|
||||
* <p>This is the main class that bootstraps the Spring application context.
|
||||
* All configuration is handled by the {@code @Configuration} classes in
|
||||
* {@code bd.gov.dghs.fhir.config}. This class intentionally contains no
|
||||
* configuration logic — it is a pure entry point.
|
||||
*
|
||||
* <p>{@code @EnableAsync} activates Spring's async task executor, which is
|
||||
* required by {@link bd.gov.dghs.fhir.audit.AuditEventEmitter} and
|
||||
* {@link bd.gov.dghs.fhir.audit.RejectedSubmissionSink} for non-blocking
|
||||
* audit writes. The executor pool is configured in {@code application.yaml}
|
||||
* under {@code spring.task.execution}.
|
||||
*/
|
||||
@SpringBootApplication(
|
||||
scanBasePackages = "bd.gov.dghs.fhir"
|
||||
)
|
||||
@EnableAsync
|
||||
public class BdFhirApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(BdFhirApplication.class, args);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,203 @@
|
||||
package bd.gov.dghs.fhir.audit;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.scheduling.annotation.Async;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* Emits audit event records to {@code audit.audit_events} asynchronously.
|
||||
*
|
||||
* <h2>Immutability enforcement</h2>
|
||||
* <p>This class uses INSERT only — no UPDATE or DELETE methods exist.
|
||||
* The {@code audit_writer_login} PostgreSQL user has INSERT-only privileges
|
||||
* on the audit schema — any UPDATE or DELETE at the JDBC level would fail
|
||||
* with a PostgreSQL permission error regardless of what this class attempts.
|
||||
*
|
||||
* <h2>Async execution</h2>
|
||||
* <p>{@code @Async} causes Spring to execute {@link #emitAsync} on the
|
||||
* {@code audit-async-} thread pool (configured in application.yaml
|
||||
* {@code spring.task.execution}). The calling FHIR request thread returns
|
||||
* immediately after submitting the task — audit writes do not add to
|
||||
* vendor-visible response latency.
|
||||
*
|
||||
* <p>If the async queue is full (500 entries by default), the task executor
|
||||
* blocks the submitting thread until space is available. At expected pilot
|
||||
* load (<10,000 resources/day = ~0.1/s average), the queue should never fill.
|
||||
* The queue depth provides burst absorption for sudden load spikes.
|
||||
*
|
||||
* <h2>Failure handling</h2>
|
||||
* <p>If the INSERT fails (postgres-audit unavailable, constraint violation, etc.),
|
||||
* the failure is logged at ERROR level. The FHIR operation has already completed
|
||||
* successfully at this point — the vendor has received their 201 or 422.
|
||||
* Audit write failure does not affect the FHIR response.
|
||||
*/
|
||||
@Component
|
||||
public class AuditEventEmitter {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(AuditEventEmitter.class);
|
||||
|
||||
private final JdbcTemplate auditJdbcTemplate;
|
||||
private final ObjectMapper objectMapper;
|
||||
|
||||
public AuditEventEmitter(
|
||||
@Qualifier("auditJdbcTemplate") JdbcTemplate auditJdbcTemplate) {
|
||||
this.auditJdbcTemplate = auditJdbcTemplate;
|
||||
this.objectMapper = new ObjectMapper()
|
||||
.registerModule(new JavaTimeModule());
|
||||
}
|
||||
|
||||
/**
|
||||
* Emits a single audit event record asynchronously.
|
||||
*
|
||||
* <p>Executes on the {@code audit-async-} thread pool.
|
||||
* Returns immediately on the calling thread.
|
||||
*/
|
||||
@Async("taskExecutor")
|
||||
public void emitAsync(AuditRecord record) {
|
||||
try {
|
||||
String validationMessagesJson = null;
|
||||
if (record.validationMessages() != null && !record.validationMessages().isEmpty()) {
|
||||
validationMessagesJson = objectMapper.writeValueAsString(
|
||||
record.validationMessages());
|
||||
}
|
||||
|
||||
auditJdbcTemplate.update(
|
||||
"""
|
||||
INSERT INTO audit.audit_events (
|
||||
event_id, event_time, event_type, operation,
|
||||
resource_type, resource_id, resource_version,
|
||||
outcome, outcome_detail,
|
||||
sending_facility, client_id, subject,
|
||||
request_ip, request_id, validation_messages
|
||||
) VALUES (
|
||||
?, ?, ?, ?,
|
||||
?, ?, ?,
|
||||
?, ?,
|
||||
?, ?, ?,
|
||||
?, ?, ?::jsonb
|
||||
)
|
||||
""",
|
||||
record.eventId(),
|
||||
// event_time must be a java.sql.Timestamp for JDBC → timestamptz mapping
|
||||
java.sql.Timestamp.from(record.eventTime()),
|
||||
record.eventType(),
|
||||
record.operation(),
|
||||
record.resourceType(),
|
||||
record.resourceId(),
|
||||
record.resourceVersion(),
|
||||
record.outcome(),
|
||||
truncate(record.outcomeDetail(), 2000),
|
||||
truncate(record.sendingFacility(), 200),
|
||||
truncate(record.clientId(), 200),
|
||||
truncate(record.subject(), 200),
|
||||
truncate(record.requestIp(), 45),
|
||||
truncate(record.requestId(), 36),
|
||||
validationMessagesJson
|
||||
);
|
||||
|
||||
log.debug("Audit event emitted: eventId={} outcome={} clientId={}",
|
||||
record.eventId(), record.outcome(), record.clientId());
|
||||
|
||||
} catch (Exception e) {
|
||||
// Log at ERROR — audit gap is a serious operational issue
|
||||
log.error("AUDIT WRITE FAILED — event lost: eventId={} outcome={} " +
|
||||
"clientId={} resourceType={} error={}",
|
||||
record.eventId(), record.outcome(),
|
||||
record.clientId(), record.resourceType(), e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Helpers
|
||||
// =========================================================================
|
||||
|
||||
private String truncate(String value, int maxLength) {
|
||||
if (value == null) return null;
|
||||
return value.length() <= maxLength ? value : value.substring(0, maxLength);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// AuditRecord — immutable value object built by AuditEventInterceptor
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Immutable audit record. Built by {@link bd.gov.dghs.fhir.interceptor.AuditEventInterceptor}
|
||||
* and passed to {@link #emitAsync}.
|
||||
*
|
||||
* <p>Uses a builder pattern because not all fields are relevant for all
|
||||
* event types (e.g., resourceId is null for auth failures).
|
||||
*/
|
||||
public record AuditRecord(
|
||||
UUID eventId,
|
||||
Instant eventTime,
|
||||
String eventType, // OPERATION | AUTH_FAILURE | VALIDATION_FAILURE | TERMINOLOGY_FAILURE
|
||||
String operation, // CREATE | UPDATE | DELETE | READ | PATCH
|
||||
String resourceType,
|
||||
String resourceId,
|
||||
Long resourceVersion,
|
||||
String outcome, // ACCEPTED | REJECTED
|
||||
String outcomeDetail,
|
||||
String sendingFacility,
|
||||
String clientId,
|
||||
String subject,
|
||||
String requestIp,
|
||||
String requestId,
|
||||
List<String> validationMessages
|
||||
) {
|
||||
public static Builder builder() { return new Builder(); }
|
||||
|
||||
public static final class Builder {
|
||||
private UUID eventId;
|
||||
private Instant eventTime;
|
||||
private String eventType;
|
||||
private String operation;
|
||||
private String resourceType;
|
||||
private String resourceId;
|
||||
private Long resourceVersion;
|
||||
private String outcome;
|
||||
private String outcomeDetail;
|
||||
private String sendingFacility;
|
||||
private String clientId;
|
||||
private String subject;
|
||||
private String requestIp;
|
||||
private String requestId;
|
||||
private List<String> validationMessages;
|
||||
|
||||
public Builder eventId(UUID v) { eventId = v; return this; }
|
||||
public Builder eventTime(Instant v) { eventTime = v; return this; }
|
||||
public Builder eventType(String v) { eventType = v; return this; }
|
||||
public Builder operation(String v) { operation = v; return this; }
|
||||
public Builder resourceType(String v) { resourceType = v; return this; }
|
||||
public Builder resourceId(String v) { resourceId = v; return this; }
|
||||
public Builder resourceVersion(Long v) { resourceVersion = v; return this; }
|
||||
public Builder outcome(String v) { outcome = v; return this; }
|
||||
public Builder outcomeDetail(String v) { outcomeDetail = v; return this; }
|
||||
public Builder sendingFacility(String v) { sendingFacility = v; return this; }
|
||||
public Builder clientId(String v) { clientId = v; return this; }
|
||||
public Builder subject(String v) { subject = v; return this; }
|
||||
public Builder requestIp(String v) { requestIp = v; return this; }
|
||||
public Builder requestId(String v) { requestId = v; return this; }
|
||||
public Builder validationMessages(List<String> v) { validationMessages = v; return this; }
|
||||
|
||||
public AuditRecord build() {
|
||||
return new AuditRecord(
|
||||
eventId != null ? eventId : UUID.randomUUID(),
|
||||
eventTime != null ? eventTime : Instant.now(),
|
||||
eventType, operation, resourceType, resourceId,
|
||||
resourceVersion, outcome, outcomeDetail, sendingFacility,
|
||||
clientId != null ? clientId : "unknown",
|
||||
subject != null ? subject : "unknown",
|
||||
requestIp, requestId, validationMessages);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,218 @@
|
||||
package bd.gov.dghs.fhir.audit;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.scheduling.annotation.Async;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* Stores rejected FHIR resource payloads to {@code audit.fhir_rejected_submissions}.
|
||||
*
|
||||
* <h2>Purpose</h2>
|
||||
* <p>Rejected resources are never stored in the main FHIR repository. Instead,
|
||||
* the full submitted payload is written here with:
|
||||
* <ul>
|
||||
* <li>The exact JSON as submitted by the vendor (byte-for-byte, in TEXT column)</li>
|
||||
* <li>A machine-readable rejection code (from the {@code rejection_code} CHECK constraint)</li>
|
||||
* <li>A human-readable rejection reason</li>
|
||||
* <li>The FHIRPath expression of the violating element</li>
|
||||
* <li>The violated profile URL (for profile violations)</li>
|
||||
* <li>The invalid code and system (for terminology violations)</li>
|
||||
* <li>The sending facility and client_id</li>
|
||||
* </ul>
|
||||
*
|
||||
* <h2>Forensic use cases</h2>
|
||||
* <ul>
|
||||
* <li>Vendor debugging: vendor submits a Condition, gets 422, asks DGHS why.
|
||||
* DGHS queries by client_id + submission_time to retrieve the exact payload
|
||||
* and rejection reason.</li>
|
||||
* <li>Dispute resolution: vendor claims they submitted a valid resource.
|
||||
* DGHS retrieves the original payload to verify.</li>
|
||||
* <li>Quality monitoring: DGHS analyses rejection patterns by facility or
|
||||
* rejection code to identify systemic data quality issues.</li>
|
||||
* <li>IG development: common rejection codes indicate constraints that may
|
||||
* be too strict or IG profiles that need clarification.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <h2>Payload storage format</h2>
|
||||
* <p>Payload is stored as TEXT (not JSONB) to preserve the exact bytes as
|
||||
* submitted. JSONB parsing at INSERT time would normalise whitespace and key
|
||||
* ordering, obscuring potential encoding or formatting issues in vendor submissions.
|
||||
* It would also reject malformed JSON payloads — but a malformed JSON payload
|
||||
* is itself a valid rejection scenario that needs to be captured.
|
||||
*
|
||||
* <h2>Retention policy</h2>
|
||||
* <p>No automatic deletion. The partitioned table structure allows old partitions
|
||||
* to be DROPped when retention policy dictates (e.g., drop partitions older than
|
||||
* 7 years per health record retention law). DROP PARTITION is a metadata-only
|
||||
* operation — instant and non-blocking unlike DELETE on unpartitioned tables.
|
||||
* Retention management is a DBA responsibility, not an application responsibility.
|
||||
*/
|
||||
@Component
|
||||
public class RejectedSubmissionSink {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(RejectedSubmissionSink.class);
|
||||
|
||||
/**
|
||||
* Maximum payload size stored in the rejected submissions table.
|
||||
* FHIR resources should not exceed 1MB in practice, but malformed or
|
||||
* adversarial payloads could be arbitrarily large. Cap at 4MB to prevent
|
||||
* the audit table from becoming a vector for storage exhaustion.
|
||||
*/
|
||||
private static final int MAX_PAYLOAD_BYTES = 4 * 1024 * 1024;
|
||||
|
||||
private final JdbcTemplate auditJdbcTemplate;
|
||||
|
||||
public RejectedSubmissionSink(
|
||||
@Qualifier("auditJdbcTemplate") JdbcTemplate auditJdbcTemplate) {
|
||||
this.auditJdbcTemplate = auditJdbcTemplate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores a rejected submission asynchronously.
|
||||
*
|
||||
* <p>Executes on the {@code audit-async-} thread pool.
|
||||
* Returns immediately on the calling FHIR request thread.
|
||||
* The 422 response is already on its way to the vendor before this runs.
|
||||
*/
|
||||
@Async("taskExecutor")
|
||||
public void storeAsync(RejectedSubmission submission) {
|
||||
try {
|
||||
// Enforce payload size cap before writing
|
||||
String payload = submission.resourcePayload();
|
||||
if (payload != null && payload.length() > MAX_PAYLOAD_BYTES) {
|
||||
log.warn("Rejected submission payload truncated: originalSize={} " +
|
||||
"clientId={} submissionId={}",
|
||||
payload.length(), submission.clientId(), submission.submissionId());
|
||||
payload = payload.substring(0, MAX_PAYLOAD_BYTES) +
|
||||
"\n... [TRUNCATED: payload exceeded " + MAX_PAYLOAD_BYTES + " bytes]";
|
||||
}
|
||||
|
||||
auditJdbcTemplate.update(
|
||||
"""
|
||||
INSERT INTO audit.fhir_rejected_submissions (
|
||||
submission_id, submission_time, event_id,
|
||||
resource_type, resource_payload,
|
||||
rejection_code, rejection_reason,
|
||||
element_path, violated_profile,
|
||||
invalid_code, invalid_system,
|
||||
sending_facility, client_id
|
||||
) VALUES (
|
||||
?, ?, ?,
|
||||
?, ?,
|
||||
?, ?,
|
||||
?, ?,
|
||||
?, ?,
|
||||
?, ?
|
||||
)
|
||||
""",
|
||||
submission.submissionId(),
|
||||
java.sql.Timestamp.from(submission.submissionTime()),
|
||||
submission.eventId(),
|
||||
truncate(submission.resourceType(), 40),
|
||||
payload,
|
||||
submission.rejectionCode(),
|
||||
truncate(submission.rejectionReason(), 5000),
|
||||
truncate(submission.elementPath(), 500),
|
||||
truncate(submission.violatedProfile(), 500),
|
||||
truncate(submission.invalidCode(), 200),
|
||||
truncate(submission.invalidSystem(), 200),
|
||||
truncate(submission.sendingFacility(), 200),
|
||||
truncate(submission.clientId(), 200)
|
||||
);
|
||||
|
||||
log.debug("Rejected submission stored: submissionId={} rejectionCode={} " +
|
||||
"clientId={} resourceType={}",
|
||||
submission.submissionId(), submission.rejectionCode(),
|
||||
submission.clientId(), submission.resourceType());
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("REJECTED SUBMISSION STORAGE FAILED — forensic record lost: " +
|
||||
"submissionId={} rejectionCode={} clientId={} error={}",
|
||||
submission.submissionId(), submission.rejectionCode(),
|
||||
submission.clientId(), e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Helper
|
||||
// =========================================================================
|
||||
|
||||
private String truncate(String value, int maxLength) {
|
||||
if (value == null) return null;
|
||||
return value.length() <= maxLength ? value : value.substring(0, maxLength);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// RejectedSubmission — immutable value object
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Immutable rejected submission record.
|
||||
* Built by {@link bd.gov.dghs.fhir.interceptor.AuditEventInterceptor}
|
||||
* and passed to {@link #storeAsync}.
|
||||
*/
|
||||
public record RejectedSubmission(
|
||||
UUID submissionId,
|
||||
Instant submissionTime,
|
||||
UUID eventId,
|
||||
String resourceType,
|
||||
String resourcePayload,
|
||||
String rejectionCode,
|
||||
String rejectionReason,
|
||||
String elementPath,
|
||||
String violatedProfile,
|
||||
String invalidCode,
|
||||
String invalidSystem,
|
||||
String sendingFacility,
|
||||
String clientId
|
||||
) {
|
||||
public static Builder builder() { return new Builder(); }
|
||||
|
||||
public static final class Builder {
|
||||
private UUID submissionId;
|
||||
private Instant submissionTime;
|
||||
private UUID eventId;
|
||||
private String resourceType;
|
||||
private String resourcePayload;
|
||||
private String rejectionCode;
|
||||
private String rejectionReason;
|
||||
private String elementPath;
|
||||
private String violatedProfile;
|
||||
private String invalidCode;
|
||||
private String invalidSystem;
|
||||
private String sendingFacility;
|
||||
private String clientId;
|
||||
|
||||
public Builder submissionId(UUID v) { submissionId = v; return this; }
|
||||
public Builder submissionTime(Instant v) { submissionTime = v; return this; }
|
||||
public Builder eventId(UUID v) { eventId = v; return this; }
|
||||
public Builder resourceType(String v) { resourceType = v; return this; }
|
||||
public Builder resourcePayload(String v) { resourcePayload = v; return this; }
|
||||
public Builder rejectionCode(String v) { rejectionCode = v; return this; }
|
||||
public Builder rejectionReason(String v) { rejectionReason = v; return this; }
|
||||
public Builder elementPath(String v) { elementPath = v; return this; }
|
||||
public Builder violatedProfile(String v) { violatedProfile = v; return this; }
|
||||
public Builder invalidCode(String v) { invalidCode = v; return this; }
|
||||
public Builder invalidSystem(String v) { invalidSystem = v; return this; }
|
||||
public Builder sendingFacility(String v) { sendingFacility = v; return this; }
|
||||
public Builder clientId(String v) { clientId = v; return this; }
|
||||
|
||||
public RejectedSubmission build() {
|
||||
return new RejectedSubmission(
|
||||
submissionId != null ? submissionId : UUID.randomUUID(),
|
||||
submissionTime != null ? submissionTime : Instant.now(),
|
||||
eventId != null ? eventId : UUID.randomUUID(),
|
||||
resourceType, resourcePayload, rejectionCode,
|
||||
rejectionReason, elementPath, violatedProfile,
|
||||
invalidCode, invalidSystem, sendingFacility,
|
||||
clientId != null ? clientId : "unknown");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,485 @@
|
||||
package bd.gov.dghs.fhir.config;
|
||||
|
||||
import com.zaxxer.hikari.HikariConfig;
|
||||
import com.zaxxer.hikari.HikariDataSource;
|
||||
import org.flywaydb.core.Flyway;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.HealthIndicator;
|
||||
import org.springframework.boot.autoconfigure.flyway.FlywayMigrationInitializer;
|
||||
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.orm.jpa.EntityManagerFactoryBuilder;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.DependsOn;
|
||||
import org.springframework.context.annotation.Primary;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.orm.jpa.JpaTransactionManager;
|
||||
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
import org.springframework.transaction.annotation.EnableTransactionManagement;
|
||||
|
||||
import jakarta.persistence.EntityManagerFactory;
|
||||
import javax.sql.DataSource;
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Dual datasource configuration for BD FHIR National deployment.
|
||||
*
|
||||
* <p>Two completely independent datasources:
|
||||
* <ul>
|
||||
* <li>{@code fhirDataSource} — read/write, HAPI JPA store (postgres-fhir via pgBouncer).
|
||||
* Primary datasource — Spring Boot JPA auto-configuration binds to this.</li>
|
||||
* <li>{@code auditDataSource} — INSERT-only, audit schema (postgres-audit via pgBouncer).
|
||||
* Secondary datasource — wired manually, excluded from default health checks.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>Two independent Flyway instances:
|
||||
* <ul>
|
||||
* <li>FHIR Flyway: runs {@code classpath:db/migration/fhir/V1__hapi_schema.sql}
|
||||
* against postgres-fhir using superuser credentials (direct, bypasses pgBouncer).</li>
|
||||
* <li>Audit Flyway: runs {@code classpath:db/migration/audit/V2__audit_schema.sql}
|
||||
* against postgres-audit using superuser credentials (direct, bypasses pgBouncer).</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>The FHIR datasource connects via pgBouncer in session mode. Flyway migrations
|
||||
* connect directly to PostgreSQL (bypassing pgBouncer) to avoid DDL transaction
|
||||
* visibility issues with pgBouncer. See application.yaml comments for rationale.
|
||||
*
|
||||
* <p>The audit datasource health indicator uses
|
||||
* {@code INSERT INTO audit.health_check ... ON CONFLICT DO NOTHING}
|
||||
* rather than {@code SELECT 1}, because the audit_writer role has INSERT-only
|
||||
* privileges and cannot execute SELECT statements.
|
||||
*/
|
||||
@Configuration
|
||||
@EnableTransactionManagement
|
||||
public class DataSourceConfig {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DataSourceConfig.class);
|
||||
|
||||
// =========================================================================
|
||||
// FHIR DATASOURCE — Primary
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* FHIR datasource properties from {@code spring.datasource.*}.
|
||||
* Spring Boot auto-configuration reads these and creates the primary datasource.
|
||||
*/
|
||||
@Bean
|
||||
@Primary
|
||||
@ConfigurationProperties(prefix = "spring.datasource")
|
||||
public DataSourceProperties fhirDataSourceProperties() {
|
||||
return new DataSourceProperties();
|
||||
}
|
||||
|
||||
/**
|
||||
* Primary HikariCP datasource for HAPI JPA.
|
||||
* Connects to postgres-fhir via pgBouncer (session mode).
|
||||
*
|
||||
* <p>Pool sizing: {@code maximumPoolSize=5} per replica.
|
||||
* At 3 replicas: 15 total PostgreSQL connections, well within
|
||||
* pgBouncer {@code pool_size=20}.
|
||||
*
|
||||
* <p>{@code @Primary} makes this the datasource that Spring Boot's
|
||||
* JPA auto-configuration, JdbcTemplate, and Flyway auto-configuration
|
||||
* bind to by default.
|
||||
*/
|
||||
@Bean
|
||||
@Primary
|
||||
@ConfigurationProperties(prefix = "spring.datasource.hikari")
|
||||
public DataSource fhirDataSource() {
|
||||
DataSource ds = fhirDataSourceProperties()
|
||||
.initializeDataSourceBuilder()
|
||||
.type(HikariDataSource.class)
|
||||
.build();
|
||||
log.info("FHIR datasource initialised: url={}",
|
||||
fhirDataSourceProperties().getUrl());
|
||||
return ds;
|
||||
}
|
||||
|
||||
/**
|
||||
* JdbcTemplate bound to the FHIR datasource.
|
||||
* Used by {@link bd.gov.dghs.fhir.init.IgPackageInitializer}
|
||||
* for advisory lock acquisition.
|
||||
*/
|
||||
@Bean
|
||||
@Primary
|
||||
public JdbcTemplate fhirJdbcTemplate(@Qualifier("fhirDataSource") DataSource fhirDataSource) {
|
||||
return new JdbcTemplate(fhirDataSource);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// AUDIT DATASOURCE — Secondary
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Audit datasource properties from {@code audit.datasource.*}.
|
||||
* Separate prefix — Spring Boot auto-configuration does NOT touch this.
|
||||
*/
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "audit.datasource")
|
||||
public DataSourceProperties auditDataSourceProperties() {
|
||||
return new DataSourceProperties();
|
||||
}
|
||||
|
||||
/**
|
||||
* HikariCP datasource for audit writes.
|
||||
* Connects to postgres-audit via pgBouncer (session mode).
|
||||
*
|
||||
* <p>The audit_writer_login PostgreSQL user has INSERT-only privileges
|
||||
* on the audit schema. Any SELECT, UPDATE, DELETE, or TRUNCATE issued
|
||||
* against this datasource will fail with a PostgreSQL permission error.
|
||||
*
|
||||
* <p>Pool sizing: {@code maximumPoolSize=2} — audit writes are async
|
||||
* and low-volume. Audit failures do not block FHIR request processing.
|
||||
*
|
||||
* <p>NOT annotated {@code @Primary} — must be injected by qualifier.
|
||||
*/
|
||||
@Bean("auditDataSource")
|
||||
public DataSource auditDataSource() {
|
||||
HikariConfig config = new HikariConfig();
|
||||
config.setJdbcUrl(auditDataSourceProperties().getUrl());
|
||||
config.setUsername(auditDataSourceProperties().getUsername());
|
||||
config.setPassword(auditDataSourceProperties().getPassword());
|
||||
config.setDriverClassName("org.postgresql.Driver");
|
||||
|
||||
// Pool sizing from environment — see application.yaml
|
||||
config.setMaximumPoolSize(
|
||||
Integer.parseInt(System.getenv().getOrDefault(
|
||||
"AUDIT_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE", "2")));
|
||||
config.setMinimumIdle(
|
||||
Integer.parseInt(System.getenv().getOrDefault(
|
||||
"AUDIT_DATASOURCE_HIKARI_MINIMUM_IDLE", "1")));
|
||||
config.setPoolName(System.getenv().getOrDefault(
|
||||
"AUDIT_DATASOURCE_HIKARI_POOL_NAME", "audit-pool"));
|
||||
|
||||
config.setConnectionTimeout(5_000L);
|
||||
config.setIdleTimeout(300_000L);
|
||||
config.setMaxLifetime(900_000L);
|
||||
config.setAutoCommit(true);
|
||||
|
||||
// PostgreSQL-specific
|
||||
config.addDataSourceProperty("ApplicationName", "bd-fhir-hapi-audit");
|
||||
config.addDataSourceProperty("socketTimeout", "10");
|
||||
|
||||
// Leak detection: 30s — audit connections should never be held long
|
||||
config.setLeakDetectionThreshold(30_000L);
|
||||
|
||||
// Connection test: audit_writer cannot SELECT 1.
|
||||
// Use INSERT ... ON CONFLICT DO NOTHING against the health_check table.
|
||||
// HikariCP calls this to verify a connection is alive when returning
|
||||
// it from the pool after idle time.
|
||||
config.setConnectionTestQuery(
|
||||
"INSERT INTO audit.health_check (check_id) " +
|
||||
"VALUES ('00000000-0000-0000-0000-000000000000') " +
|
||||
"ON CONFLICT DO NOTHING");
|
||||
|
||||
log.info("Audit datasource initialised: url={}",
|
||||
auditDataSourceProperties().getUrl());
|
||||
|
||||
return new HikariDataSource(config);
|
||||
}
|
||||
|
||||
/**
|
||||
* JdbcTemplate bound to the audit datasource.
|
||||
* Used by {@link bd.gov.dghs.fhir.audit.AuditEventEmitter}
|
||||
* and {@link bd.gov.dghs.fhir.audit.RejectedSubmissionSink}.
|
||||
*/
|
||||
@Bean("auditJdbcTemplate")
|
||||
public JdbcTemplate auditJdbcTemplate(
|
||||
@Qualifier("auditDataSource") DataSource auditDataSource) {
|
||||
return new JdbcTemplate(auditDataSource);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// FLYWAY — FHIR schema
|
||||
// Spring Boot auto-configuration runs the primary Flyway instance.
|
||||
// We override it here to point at the fhir/ migration subdirectory
|
||||
// and to connect directly to PostgreSQL (bypassing pgBouncer).
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* FHIR Flyway instance.
|
||||
*
|
||||
* <p>Connects directly to postgres-fhir using superuser credentials
|
||||
* ({@code SPRING_FLYWAY_USER} / {@code SPRING_FLYWAY_PASSWORD}) rather
|
||||
* than the application user, so that it can CREATE TABLE, CREATE INDEX,
|
||||
* and CREATE SEQUENCE.
|
||||
*
|
||||
* <p>The connection URL ({@code SPRING_FLYWAY_URL}) points directly to
|
||||
* postgres-fhir, not pgBouncer, to avoid DDL transaction visibility issues.
|
||||
*
|
||||
* <p>Spring Boot's Flyway auto-configuration is disabled in favour of
|
||||
* this explicit bean. See {@code spring.flyway.enabled=true} in
|
||||
* application.yaml — Spring Boot reads the properties but we override
|
||||
* the bean with our own configuration here for full control.
|
||||
*/
|
||||
@Bean("fhirFlyway")
|
||||
public Flyway fhirFlyway() {
|
||||
String url = System.getenv("SPRING_FLYWAY_URL");
|
||||
String user = System.getenv("SPRING_FLYWAY_USER");
|
||||
String password = System.getenv("SPRING_FLYWAY_PASSWORD");
|
||||
|
||||
if (url == null || user == null || password == null) {
|
||||
throw new IllegalStateException(
|
||||
"FHIR Flyway configuration missing. " +
|
||||
"Required: SPRING_FLYWAY_URL, SPRING_FLYWAY_USER, SPRING_FLYWAY_PASSWORD");
|
||||
}
|
||||
|
||||
Flyway flyway = Flyway.configure()
|
||||
.dataSource(url, user, password)
|
||||
.locations("classpath:db/migration/fhir")
|
||||
.table("flyway_schema_history")
|
||||
.validateOnMigrate(true)
|
||||
.outOfOrder(false)
|
||||
.baselineOnMigrate(false)
|
||||
.mixed(false)
|
||||
.connectRetries(10) // retry DB connection up to 10 times on startup
|
||||
.connectRetriesInterval(5) // 5 seconds between retries
|
||||
.load();
|
||||
|
||||
log.info("Running FHIR Flyway migrations from classpath:db/migration/fhir");
|
||||
var result = flyway.migrate();
|
||||
log.info("FHIR Flyway: {} migration(s) applied, current version: {}",
|
||||
result.migrationsExecuted, result.targetSchemaVersion);
|
||||
|
||||
return flyway;
|
||||
}
|
||||
|
||||
/**
|
||||
* FlywayMigrationInitializer ensures Flyway runs before JPA EntityManagerFactory
|
||||
* attempts to validate the schema. Without this ordering, Hibernate's
|
||||
* {@code ddl-auto: validate} runs against an empty database and fails.
|
||||
*/
|
||||
@Bean
|
||||
@DependsOn("fhirFlyway")
|
||||
public FlywayMigrationInitializer fhirFlywayInitializer(
|
||||
@Qualifier("fhirFlyway") Flyway fhirFlyway) {
|
||||
return new FlywayMigrationInitializer(fhirFlyway, null);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// FLYWAY — Audit schema
|
||||
// Completely independent instance — separate database, separate history table.
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Audit Flyway instance.
|
||||
*
|
||||
* <p>Runs {@code V2__audit_schema.sql} against postgres-audit using
|
||||
* superuser credentials. Creates the audit schema, partitioned tables,
|
||||
* roles, and grants.
|
||||
*
|
||||
* <p>Uses {@code flyway_audit_schema_history} as its metadata table name
|
||||
* to avoid collision with the FHIR Flyway history table.
|
||||
*/
|
||||
@Bean("auditFlyway")
|
||||
public Flyway auditFlyway() {
|
||||
String url = System.getenv("AUDIT_FLYWAY_URL");
|
||||
String user = System.getenv("AUDIT_FLYWAY_USER");
|
||||
String password = System.getenv("AUDIT_FLYWAY_PASSWORD");
|
||||
|
||||
if (url == null || user == null || password == null) {
|
||||
throw new IllegalStateException(
|
||||
"Audit Flyway configuration missing. " +
|
||||
"Required: AUDIT_FLYWAY_URL, AUDIT_FLYWAY_USER, AUDIT_FLYWAY_PASSWORD");
|
||||
}
|
||||
|
||||
Flyway flyway = Flyway.configure()
|
||||
.dataSource(url, user, password)
|
||||
.locations("classpath:db/migration/audit")
|
||||
.table("flyway_audit_schema_history")
|
||||
.validateOnMigrate(true)
|
||||
.outOfOrder(false)
|
||||
.baselineOnMigrate(false)
|
||||
.mixed(false)
|
||||
.connectRetries(10)
|
||||
.connectRetriesInterval(5)
|
||||
.load();
|
||||
|
||||
log.info("Running Audit Flyway migrations from classpath:db/migration/audit");
|
||||
var result = flyway.migrate();
|
||||
log.info("Audit Flyway: {} migration(s) applied, current version: {}",
|
||||
result.migrationsExecuted, result.targetSchemaVersion);
|
||||
|
||||
return flyway;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@DependsOn("auditFlyway")
|
||||
public FlywayMigrationInitializer auditFlywayInitializer(
|
||||
@Qualifier("auditFlyway") Flyway auditFlyway) {
|
||||
return new FlywayMigrationInitializer(auditFlyway, null);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// HEALTH INDICATORS
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Custom health indicator for the audit datasource.
|
||||
*
|
||||
* <p>Spring Boot's default {@code DataSourceHealthIndicator} executes
|
||||
* {@code SELECT 1} against all registered datasources. The audit_writer
|
||||
* role cannot execute SELECT — this would cause a spurious DOWN status,
|
||||
* triggering load balancer container cycling.
|
||||
*
|
||||
* <p>This indicator uses {@code INSERT INTO audit.health_check ... ON CONFLICT DO NOTHING}
|
||||
* which the audit_writer role is permitted to execute. The INSERT is idempotent
|
||||
* and does not grow the health_check table (the single seeded row is reused
|
||||
* on every conflict).
|
||||
*
|
||||
* <p>Registered as {@code auditDb} — matches the readiness probe group
|
||||
* in application.yaml ({@code management.endpoint.health.group.readiness.include}).
|
||||
*/
|
||||
@Bean("auditDbHealthIndicator")
|
||||
public HealthIndicator auditDbHealthIndicator(
|
||||
@Qualifier("auditDataSource") DataSource auditDataSource) {
|
||||
return () -> {
|
||||
try (Connection conn = auditDataSource.getConnection()) {
|
||||
// Test the connection is alive with an INSERT the audit_writer
|
||||
// role is permitted to execute.
|
||||
conn.createStatement().execute(
|
||||
"INSERT INTO audit.health_check (check_id) " +
|
||||
"VALUES ('00000000-0000-0000-0000-000000000000') " +
|
||||
"ON CONFLICT DO NOTHING");
|
||||
return Health.up()
|
||||
.withDetail("database", "postgres-audit")
|
||||
.withDetail("pool", "audit-pool")
|
||||
.build();
|
||||
} catch (SQLException e) {
|
||||
log.error("Audit datasource health check failed: {}", e.getMessage());
|
||||
return Health.down()
|
||||
.withDetail("database", "postgres-audit")
|
||||
.withDetail("error", e.getMessage())
|
||||
.build();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Health indicator for OCL reachability.
|
||||
*
|
||||
* <p>Registered as {@code ocl} — included in the readiness probe group.
|
||||
* OCL unreachability makes the replica not-ready (no traffic) but does
|
||||
* not kill the container (liveness remains passing).
|
||||
*
|
||||
* <p>Uses a lightweight HEAD request to the OCL FHIR metadata endpoint
|
||||
* rather than a full $validate-code call.
|
||||
*/
|
||||
@Bean("oclHealthIndicator")
|
||||
public HealthIndicator oclHealthIndicator() {
|
||||
String oclBaseUrl = System.getenv().getOrDefault(
|
||||
"HAPI_OCL_BASE_URL", "https://tr.ocl.dghs.gov.bd/api/fhir");
|
||||
String metadataUrl = oclBaseUrl + "/metadata";
|
||||
|
||||
return () -> {
|
||||
try {
|
||||
java.net.URI uri = java.net.URI.create(metadataUrl);
|
||||
java.net.http.HttpClient client = java.net.http.HttpClient.newBuilder()
|
||||
.connectTimeout(java.time.Duration.ofSeconds(5))
|
||||
.build();
|
||||
java.net.http.HttpRequest request = java.net.http.HttpRequest.newBuilder()
|
||||
.uri(uri)
|
||||
.method("HEAD", java.net.http.HttpRequest.BodyPublishers.noBody())
|
||||
.timeout(java.time.Duration.ofSeconds(5))
|
||||
.build();
|
||||
java.net.http.HttpResponse<Void> response = client.send(
|
||||
request, java.net.http.HttpResponse.BodyHandlers.discarding());
|
||||
|
||||
if (response.statusCode() < 500) {
|
||||
return Health.up()
|
||||
.withDetail("url", metadataUrl)
|
||||
.withDetail("status", response.statusCode())
|
||||
.build();
|
||||
} else {
|
||||
return Health.down()
|
||||
.withDetail("url", metadataUrl)
|
||||
.withDetail("status", response.statusCode())
|
||||
.build();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("OCL health check failed: {}", e.getMessage());
|
||||
return Health.down()
|
||||
.withDetail("url", metadataUrl)
|
||||
.withDetail("error", e.getMessage())
|
||||
.build();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// JPA — EntityManagerFactory and TransactionManager
|
||||
// Explicitly declared to ensure Spring Boot binds them to fhirDataSource.
|
||||
// Without explicit declaration, Spring Boot auto-configuration may attempt
|
||||
// to bind to both datasources (because two DataSource beans exist)
|
||||
// and fail with "No qualifying bean of type DataSource".
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* EntityManagerFactory for HAPI JPA, bound explicitly to fhirDataSource.
|
||||
*
|
||||
* <p>Package scan covers HAPI's internal entity classes. HAPI registers
|
||||
* its entities via its own JPA configuration — this factory is the container
|
||||
* that hosts them. The {@code persistenceUnit} name "default" is what HAPI
|
||||
* expects when it resolves the EntityManagerFactory from the Spring context.
|
||||
*/
|
||||
@Bean
|
||||
@Primary
|
||||
@DependsOn({"fhirFlywayInitializer"})
|
||||
public LocalContainerEntityManagerFactoryBean entityManagerFactory(
|
||||
EntityManagerFactoryBuilder builder,
|
||||
@Qualifier("fhirDataSource") DataSource fhirDataSource) {
|
||||
|
||||
Map<String, Object> properties = new HashMap<>();
|
||||
properties.put("hibernate.dialect",
|
||||
"org.hibernate.dialect.PostgreSQLDialect");
|
||||
properties.put("hibernate.hbm2ddl.auto", "validate");
|
||||
properties.put("hibernate.jdbc.batch_size", "50");
|
||||
properties.put("hibernate.order_inserts", "true");
|
||||
properties.put("hibernate.order_updates", "true");
|
||||
properties.put("hibernate.jdbc.fetch_size", "100");
|
||||
properties.put("hibernate.cache.use_second_level_cache", "false");
|
||||
properties.put("hibernate.cache.use_query_cache", "false");
|
||||
properties.put("hibernate.generate_statistics", "false");
|
||||
properties.put("hibernate.format_sql", "false");
|
||||
properties.put("hibernate.show_sql", "false");
|
||||
properties.put("hibernate.connection.handling_mode",
|
||||
"DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION");
|
||||
|
||||
return builder
|
||||
.dataSource(fhirDataSource)
|
||||
.packages(
|
||||
// HAPI entity packages
|
||||
"ca.uhn.hapi.fhir.jpa.model.entity",
|
||||
"ca.uhn.hapi.fhir.jpa.entity",
|
||||
// NPM package entities
|
||||
"ca.uhn.fhir.jpa.model.entity"
|
||||
)
|
||||
.persistenceUnit("default")
|
||||
.properties(properties)
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Transaction manager for HAPI JPA, bound to the FHIR EntityManagerFactory.
|
||||
*
|
||||
* <p>Audit writes use direct JDBC via {@code auditJdbcTemplate} —
|
||||
* they are not transactional in the JPA sense and do not participate
|
||||
* in HAPI's JPA transactions.
|
||||
*/
|
||||
@Bean
|
||||
@Primary
|
||||
public PlatformTransactionManager transactionManager(
|
||||
@Qualifier("entityManagerFactory") EntityManagerFactory entityManagerFactory) {
|
||||
JpaTransactionManager txManager = new JpaTransactionManager();
|
||||
txManager.setEntityManagerFactory(entityManagerFactory);
|
||||
return txManager;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,542 @@
|
||||
package bd.gov.dghs.fhir.config;
|
||||
|
||||
import bd.gov.dghs.fhir.init.IgPackageInitializer;
|
||||
import bd.gov.dghs.fhir.terminology.BdTerminologyValidationSupport;
|
||||
import ca.uhn.fhir.context.FhirContext;
|
||||
import ca.uhn.fhir.context.support.DefaultProfileValidationSupport;
|
||||
import ca.uhn.fhir.context.support.IValidationSupport;
|
||||
import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
|
||||
import ca.uhn.fhir.jpa.api.dao.DaoRegistry;
|
||||
import ca.uhn.fhir.jpa.packages.NpmPackageValidationSupport;
|
||||
import ca.uhn.fhir.jpa.starter.AppProperties;
|
||||
import ca.uhn.fhir.rest.server.RestfulServer;
|
||||
import ca.uhn.fhir.rest.server.interceptor.RequestValidatingInterceptor;
|
||||
import ca.uhn.fhir.rest.server.interceptor.ResponseHighlighterInterceptor;
|
||||
import ca.uhn.fhir.validation.FhirValidator;
|
||||
import ca.uhn.fhir.validation.IValidatorModule;
|
||||
import ca.uhn.fhir.validation.ResultSeverityEnum;
|
||||
import org.hl7.fhir.common.hapi.validation.support.CommonCodeSystemsTerminologyService;
|
||||
import org.hl7.fhir.common.hapi.validation.support.InMemoryTerminologyServerValidationSupport;
|
||||
import org.hl7.fhir.common.hapi.validation.support.SnapshotGeneratingValidationSupport;
|
||||
import org.hl7.fhir.common.hapi.validation.support.ValidationSupportChain;
|
||||
import org.hl7.fhir.common.hapi.validation.validator.FhirInstanceValidator;
|
||||
import org.hl7.fhir.r4.model.Meta;
|
||||
import org.hl7.fhir.r4.model.Resource;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.DependsOn;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* HAPI FHIR server configuration for BD National deployment.
|
||||
*
|
||||
* <p>Responsibilities:
|
||||
* <ol>
|
||||
* <li>Build the validation support chain in correct dependency order</li>
|
||||
* <li>Load BD Core IG package.tgz from classpath into NpmPackageValidationSupport</li>
|
||||
* <li>Configure JPA storage settings (no H2, correct dialect)</li>
|
||||
* <li>Register the RequestValidatingInterceptor that enforces validation on all writes</li>
|
||||
* <li>Register the unvalidated-profile meta tag interceptor for unknown resource types</li>
|
||||
* </ol>
|
||||
*
|
||||
* <h2>Validation Support Chain (order is critical)</h2>
|
||||
* <pre>
|
||||
* 1. DefaultProfileValidationSupport
|
||||
* — Built-in FHIR R4 base profiles, StructureDefinitions, ValueSets.
|
||||
* — Must be first: provides the base against which all profiles are validated.
|
||||
*
|
||||
* 2. CommonCodeSystemsTerminologyService
|
||||
* — Validates codes in common code systems (UCUM, MimeType, Language, etc.)
|
||||
* without calling an external terminology server.
|
||||
* — Must precede remote support: prevents unnecessary OCL calls for
|
||||
* non-ICD-11 coded elements.
|
||||
*
|
||||
* 3. SnapshotGeneratingValidationSupport
|
||||
* — Generates snapshot views of differential StructureDefinitions.
|
||||
* — BD Core IG profiles may be published as differential-only.
|
||||
* Validation requires the snapshot. This support generates it on demand.
|
||||
* — Must come after DefaultProfileValidationSupport (needs base profiles
|
||||
* to generate snapshots from differentials).
|
||||
*
|
||||
* 4. InMemoryTerminologyServerValidationSupport
|
||||
* — In-memory cache for terminology lookups.
|
||||
* — Serves as the 24-hour cache layer in front of OCL.
|
||||
* — Must come before BdTerminologyValidationSupport (cache-before-remote).
|
||||
*
|
||||
* 5. NpmPackageValidationSupport (BD Core IG)
|
||||
* — Loads bd.gov.dghs.core-0.2.1.tgz profiles into the validation chain.
|
||||
* — bd-patient, bd-condition, bd-encounter, bd-observation, bd-practitioner,
|
||||
* bd-organization, bd-location, bd-medication, bd-medicationrequest,
|
||||
* bd-immunization.
|
||||
* — Must come after snapshot support (profiles may need snapshot generation).
|
||||
*
|
||||
* 6. BdTerminologyValidationSupport (custom — see terminology/ package)
|
||||
* — Forces $validate-code against OCL for ICD-11 codes.
|
||||
* — Suppresses $expand (OCL limitation).
|
||||
* — Must be last: OCL is the final authority for terminology.
|
||||
* InMemoryTerminologyServerValidationSupport serves the cache hit path.
|
||||
* </pre>
|
||||
*/
|
||||
@Configuration
|
||||
@DependsOn({"fhirFlywayInitializer", "auditFlywayInitializer"})
|
||||
public class FhirServerConfig {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(FhirServerConfig.class);
|
||||
|
||||
// BD Core IG profile URLs — resources conforming to these get full validation.
|
||||
// Resources NOT in this set get stored with unvalidated-profile meta tag.
|
||||
private static final Set<String> BD_CORE_PROFILE_RESOURCE_TYPES = Set.of(
|
||||
"Patient", "Condition", "Encounter", "Observation",
|
||||
"Practitioner", "Organization", "Location",
|
||||
"Medication", "MedicationRequest", "Immunization"
|
||||
);
|
||||
|
||||
@Value("${hapi.fhir.server-address}")
|
||||
private String serverAddress;
|
||||
|
||||
@Value("${bd.fhir.ig.package-classpath}")
|
||||
private String igPackageClasspath;
|
||||
|
||||
@Value("${bd.fhir.ig.version}")
|
||||
private String igVersion;
|
||||
|
||||
@Value("${bd.fhir.unvalidated-profile-tag-system}")
|
||||
private String unvalidatedTagSystem;
|
||||
|
||||
@Value("${bd.fhir.unvalidated-profile-tag-code}")
|
||||
private String unvalidatedTagCode;
|
||||
|
||||
// =========================================================================
|
||||
// FHIR CONTEXT
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Singleton FhirContext for FHIR R4.
|
||||
*
|
||||
* <p>FhirContext is expensive to create (parses all FHIR R4 model classes).
|
||||
* It must be a singleton — never create per-request or per-validator instances.
|
||||
* HAPI's Spring Boot starter provides this bean automatically, but we declare
|
||||
* it explicitly to ensure it is available for injection by our custom classes
|
||||
* before HAPI's auto-configuration runs.
|
||||
*/
|
||||
@Bean
|
||||
public FhirContext fhirContext() {
|
||||
FhirContext ctx = FhirContext.forR4();
|
||||
// Performance: disable narrative generation server-side
|
||||
ctx.setNarrativeGenerator(null);
|
||||
log.info("FhirContext R4 initialised");
|
||||
return ctx;
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// JPA STORAGE SETTINGS
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* HAPI JPA storage settings.
|
||||
*
|
||||
* <p>Key decisions:
|
||||
* <ul>
|
||||
* <li>No H2: enforced by PostgreSQL datasource — H2 is never on classpath.</li>
|
||||
* <li>Allow external references: true — BD Core IG uses canonical URLs.</li>
|
||||
* <li>Reindex on parameter change: false — managed via HAPI $reindex operation.</li>
|
||||
* <li>Auto-create placeholder references: false — all references must resolve.</li>
|
||||
* </ul>
|
||||
*/
|
||||
@Bean
|
||||
public JpaStorageSettings jpaStorageSettings() {
|
||||
JpaStorageSettings settings = new JpaStorageSettings();
|
||||
|
||||
// Allow resources to reference external canonical URLs
|
||||
// (BD Core IG profiles reference https://fhir.dghs.gov.bd/core/*)
|
||||
settings.setAllowExternalReferences(true);
|
||||
|
||||
// Enforce referential integrity — referenced resources must exist
|
||||
// in the repository before the referencing resource is stored.
|
||||
// Set to false for pilot phase to allow incremental vendor onboarding.
|
||||
// Set to true for national rollout when all resource types are live.
|
||||
settings.setEnforceReferentialIntegrityOnWrite(false);
|
||||
settings.setEnforceReferentialIntegrityOnDelete(false);
|
||||
|
||||
// Reuse cached search results for 60 seconds
|
||||
settings.setReuseCachedSearchResultsForMillis(60_000L);
|
||||
|
||||
// Maximum number of search results to load into memory
|
||||
// before streaming. 1000 is safe for pilot scale.
|
||||
settings.setFetchSizeDefaultMaximum(1000);
|
||||
|
||||
// Default and max page sizes — match application.yaml
|
||||
settings.setDefaultPageSize(20);
|
||||
settings.setMaximumPageSize(200);
|
||||
|
||||
// Auto-version references: off — vendors must submit correct versions
|
||||
settings.setAutoVersionReferenceAtPaths();
|
||||
|
||||
// Tag system for unvalidated profiles
|
||||
// This is the tag we add to resources of unknown types
|
||||
log.info("JPA storage settings configured. Unvalidated profile tag: {}|{}",
|
||||
unvalidatedTagSystem, unvalidatedTagCode);
|
||||
|
||||
return settings;
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// VALIDATION SUPPORT CHAIN
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* BD Core IG NpmPackageValidationSupport.
|
||||
*
|
||||
* <p>Loads the BD Core IG package.tgz from the classpath into memory.
|
||||
* The package.tgz is bundled into the Docker image at build time
|
||||
* ({@code src/main/resources/packages/}).
|
||||
*
|
||||
* <p>Package loading happens during Spring context initialisation via
|
||||
* {@link IgPackageInitializer}, which acquires a PostgreSQL advisory lock
|
||||
* to prevent race conditions on multi-replica startup. This bean is the
|
||||
* validation support wrapper — actual loading is triggered by
|
||||
* {@link IgPackageInitializer}.
|
||||
*/
|
||||
@Bean
|
||||
public NpmPackageValidationSupport npmPackageValidationSupport(FhirContext fhirContext) {
|
||||
NpmPackageValidationSupport support = new NpmPackageValidationSupport(fhirContext);
|
||||
|
||||
// Load the IG package from classpath.
|
||||
// The classpath path resolves to the .tgz bundled in the fat JAR.
|
||||
String classpathPath = igPackageClasspath.replace("classpath:", "");
|
||||
try (InputStream is = getClass().getClassLoader().getResourceAsStream(classpathPath)) {
|
||||
if (is == null) {
|
||||
throw new IllegalStateException(
|
||||
"BD Core IG package not found at classpath: " + classpathPath +
|
||||
". Ensure the .tgz file is present in src/main/resources/packages/ " +
|
||||
"before building the Docker image.");
|
||||
}
|
||||
support.loadPackageFromClasspath("classpath:" + classpathPath);
|
||||
log.info("BD Core IG package loaded: classpath:{} (version {})",
|
||||
classpathPath, igVersion);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(
|
||||
"Failed to load BD Core IG package from classpath: " + classpathPath, e);
|
||||
}
|
||||
|
||||
return support;
|
||||
}
|
||||
|
||||
/**
|
||||
* The complete validation support chain.
|
||||
*
|
||||
* <p>Chain order is documented in the class Javadoc above.
|
||||
* Do not reorder without understanding the dependency graph.
|
||||
*
|
||||
* <p>The {@link BdTerminologyValidationSupport} is last because:
|
||||
* <ol>
|
||||
* <li>HAPI calls each support in order, stopping at the first
|
||||
* that returns a non-null result.</li>
|
||||
* <li>{@code InMemoryTerminologyServerValidationSupport} serves
|
||||
* cache hits — if the code was validated in the last 24 hours,
|
||||
* OCL is never called.</li>
|
||||
* <li>{@code BdTerminologyValidationSupport} handles cache misses
|
||||
* by calling OCL {@code $validate-code}.</li>
|
||||
* </ol>
|
||||
*/
|
||||
@Bean
|
||||
public ValidationSupportChain validationSupportChain(
|
||||
FhirContext fhirContext,
|
||||
NpmPackageValidationSupport npmPackageValidationSupport,
|
||||
BdTerminologyValidationSupport bdTerminologyValidationSupport) {
|
||||
|
||||
ValidationSupportChain chain = new ValidationSupportChain(
|
||||
// 1. Base FHIR R4 profiles — must be first
|
||||
new DefaultProfileValidationSupport(fhirContext),
|
||||
|
||||
// 2. Common code systems (UCUM, MimeType, etc.) — before remote
|
||||
new CommonCodeSystemsTerminologyService(fhirContext),
|
||||
|
||||
// 3. Snapshot generation from differentials
|
||||
new SnapshotGeneratingValidationSupport(fhirContext),
|
||||
|
||||
// 4. In-memory cache — serves validated codes without OCL call
|
||||
// This is HAPI's InMemoryTerminologyServerValidationSupport,
|
||||
// not our custom cache. HAPI's built-in cache handles
|
||||
// ValueSet expansion results for non-ICD-11 systems.
|
||||
new InMemoryTerminologyServerValidationSupport(fhirContext),
|
||||
|
||||
// 5. BD Core IG profiles (bd-patient, bd-condition, etc.)
|
||||
npmPackageValidationSupport,
|
||||
|
||||
// 6. OCL remote terminology — ICD-11 $validate-code, $expand suppressed
|
||||
bdTerminologyValidationSupport
|
||||
);
|
||||
|
||||
log.info("Validation support chain configured with {} supports",
|
||||
chain.getValidationSupports().size());
|
||||
|
||||
return chain;
|
||||
}
|
||||
|
||||
/**
|
||||
* FhirInstanceValidator — the HAPI validator module that runs profile
|
||||
* validation against a resource using the support chain.
|
||||
*
|
||||
* <p>This module is registered with the {@link FhirValidator} and invoked
|
||||
* by the {@link RequestValidatingInterceptor} on every incoming request.
|
||||
*/
|
||||
@Bean
|
||||
public IValidatorModule fhirInstanceValidator(
|
||||
ValidationSupportChain validationSupportChain) {
|
||||
|
||||
FhirInstanceValidator validator = new FhirInstanceValidator(validationSupportChain);
|
||||
|
||||
// Error on unknown profile: true — resources claiming conformance to
|
||||
// a profile that is not loaded in the chain cause a validation error.
|
||||
// This prevents vendors from submitting resources with invented profile URLs.
|
||||
validator.setErrorForUnknownProfiles(true);
|
||||
|
||||
// No extensions allowed that are not declared in the IG.
|
||||
// Vendors must use only extensions defined in BD Core IG.
|
||||
// Set to false during initial onboarding if vendors have custom extensions
|
||||
// not yet added to the IG.
|
||||
validator.setNoExtensibleWarnings(false);
|
||||
|
||||
// Assume valid rest references: false — validate that all relative
|
||||
// references point to resource types that exist in the server.
|
||||
validator.setAssumeValidRestReferences(false);
|
||||
|
||||
return validator;
|
||||
}
|
||||
|
||||
/**
|
||||
* RequestValidatingInterceptor — enforces validation on ALL incoming writes.
|
||||
*
|
||||
* <p>Registered with the HAPI RestfulServer. Intercepts every CREATE,
|
||||
* UPDATE, and PATCH request. Validates the resource against the support chain
|
||||
* before the JPA persistence layer is invoked.
|
||||
*
|
||||
* <p>On validation failure: returns HTTP 422 Unprocessable Entity with a
|
||||
* FHIR OperationOutcome containing:
|
||||
* <ul>
|
||||
* <li>Issue severity: error</li>
|
||||
* <li>Issue code: processing or business-rule</li>
|
||||
* <li>Diagnostics: human-readable description of the violation</li>
|
||||
* <li>Expression: FHIRPath of the offending element</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>IMPORTANT: {@code setFailOnSeverity(ResultSeverityEnum.ERROR)} is the
|
||||
* correct setting. {@code WARNING} severity issues do not cause rejection.
|
||||
* Only {@code ERROR} and {@code FATAL} cause 422 rejection.
|
||||
*/
|
||||
@Bean
|
||||
public RequestValidatingInterceptor requestValidatingInterceptor(
|
||||
IValidatorModule fhirInstanceValidator) {
|
||||
|
||||
RequestValidatingInterceptor interceptor = new RequestValidatingInterceptor();
|
||||
interceptor.addValidatorModule(fhirInstanceValidator);
|
||||
|
||||
// Reject on ERROR severity — WARNING is logged but not rejected.
|
||||
// This is the correct national HIE setting: reject anything that
|
||||
// violates a SHALL or MUST constraint; warn on SHOULD constraints.
|
||||
interceptor.setFailOnSeverity(ResultSeverityEnum.ERROR);
|
||||
|
||||
// Add validation results to the response OperationOutcome so vendors
|
||||
// can see exactly which elements failed and why.
|
||||
interceptor.setAddResponseHeadersOnSeverity(ResultSeverityEnum.INFORMATION);
|
||||
|
||||
// Validate requests only — not responses (performance: see application.yaml)
|
||||
interceptor.setValidateResponses(false);
|
||||
|
||||
log.info("RequestValidatingInterceptor configured: failOnSeverity=ERROR, " +
|
||||
"validateRequests=true, validateResponses=false");
|
||||
|
||||
return interceptor;
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// RESTFUL SERVER CONFIGURATION
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* RestfulServer post-configuration.
|
||||
*
|
||||
* <p>Called after HAPI's Spring Boot auto-configuration has created the
|
||||
* RestfulServer. Registers our interceptors and configures server metadata.
|
||||
*
|
||||
* <p>Interceptor registration order matters:
|
||||
* <ol>
|
||||
* <li>{@link bd.gov.dghs.fhir.interceptor.KeycloakJwtInterceptor} — runs first,
|
||||
* rejects unauthenticated/unauthorised requests before any FHIR processing.</li>
|
||||
* <li>{@link RequestValidatingInterceptor} — runs after auth, validates resource
|
||||
* content before persistence.</li>
|
||||
* <li>{@link bd.gov.dghs.fhir.interceptor.AuditEventInterceptor} — runs after
|
||||
* persistence, emits audit records for accepted and rejected requests.</li>
|
||||
* </ol>
|
||||
*
|
||||
* <p>Note: interceptors are registered in FhirServerConfig but their beans
|
||||
* are defined in their respective classes. This method wires them into the server.
|
||||
* The actual @Bean definitions are in KeycloakJwtInterceptor.java and
|
||||
* AuditEventInterceptor.java (Steps 7 and 9 respectively).
|
||||
*/
|
||||
@Bean
|
||||
public RestfulServerConfigurer restfulServerConfigurer() {
|
||||
return server -> {
|
||||
// Server metadata
|
||||
server.setServerName("BD FHIR National Repository");
|
||||
server.setServerVersion(igVersion);
|
||||
server.setImplementationDescription(
|
||||
"National FHIR R4 repository for Bangladesh. " +
|
||||
"Conforms to BD Core FHIR IG v" + igVersion + ". " +
|
||||
"Published by DGHS/MoHFW Bangladesh.");
|
||||
|
||||
// Interceptors registered in correct order.
|
||||
// Keycloak and Audit interceptors are injected by Spring —
|
||||
// they are declared as @Bean in their own classes and
|
||||
// Spring injects them here via method parameter injection.
|
||||
// The actual parameter injection happens in the overriding
|
||||
// @Configuration class that extends this configurer.
|
||||
log.info("RestfulServer configured: serverAddress={}", serverAddress);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Functional interface for RestfulServer post-configuration.
|
||||
* Allows the configurer to be injected and applied by HAPI's starter.
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface RestfulServerConfigurer {
|
||||
void configure(RestfulServer server);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// UNVALIDATED PROFILE TAG INTERCEPTOR
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Adds {@code unvalidated-profile} meta tag to resources of types that
|
||||
* are not profiled in BD Core IG.
|
||||
*
|
||||
* <p>BD Core IG profiles the following resource types:
|
||||
* Patient, Condition, Encounter, Observation, Practitioner,
|
||||
* Organization, Location, Medication, MedicationRequest, Immunization.
|
||||
*
|
||||
* <p>All other resource types (e.g., Provenance, DocumentReference,
|
||||
* DiagnosticReport) are stored with a tag indicating they have not been
|
||||
* validated against a BD Core profile. They are NOT rejected — rejection
|
||||
* is reserved for resources that claim BD Core profile conformance but fail.
|
||||
*
|
||||
* <p>Tag: {@code https://fhir.dghs.gov.bd/tags | unvalidated-profile}
|
||||
*
|
||||
* <p>Queryable via: {@code GET /fhir/[type]?_tag=https://fhir.dghs.gov.bd/tags|unvalidated-profile}
|
||||
*/
|
||||
@Bean
|
||||
public ca.uhn.fhir.rest.server.interceptor.ServerOperationInterceptorAdapter
|
||||
unvalidatedProfileTagInterceptor() {
|
||||
|
||||
return new ca.uhn.fhir.rest.server.interceptor.ServerOperationInterceptorAdapter() {
|
||||
|
||||
@Override
|
||||
public void resourceCreated(
|
||||
ca.uhn.fhir.rest.api.server.RequestDetails theRequest,
|
||||
org.hl7.fhir.instance.model.api.IBaseResource theResource) {
|
||||
tagIfUnknownType(theResource);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resourceUpdated(
|
||||
ca.uhn.fhir.rest.api.server.RequestDetails theRequest,
|
||||
org.hl7.fhir.instance.model.api.IBaseResource theOldResource,
|
||||
org.hl7.fhir.instance.model.api.IBaseResource theNewResource) {
|
||||
tagIfUnknownType(theNewResource);
|
||||
}
|
||||
|
||||
private void tagIfUnknownType(
|
||||
org.hl7.fhir.instance.model.api.IBaseResource resource) {
|
||||
|
||||
if (!(resource instanceof Resource r4Resource)) {
|
||||
return;
|
||||
}
|
||||
|
||||
String resourceType = r4Resource.getResourceType().name();
|
||||
|
||||
if (!BD_CORE_PROFILE_RESOURCE_TYPES.contains(resourceType)) {
|
||||
Meta meta = r4Resource.getMeta();
|
||||
|
||||
// Check if tag already present (idempotent)
|
||||
boolean alreadyTagged = meta.getTag().stream().anyMatch(coding ->
|
||||
unvalidatedTagSystem.equals(coding.getSystem()) &&
|
||||
unvalidatedTagCode.equals(coding.getCode()));
|
||||
|
||||
if (!alreadyTagged) {
|
||||
meta.addTag()
|
||||
.setSystem(unvalidatedTagSystem)
|
||||
.setCode(unvalidatedTagCode)
|
||||
.setDisplay("Resource type not profiled in BD Core IG " +
|
||||
igVersion + " — stored without profile validation");
|
||||
|
||||
log.info("Added unvalidated-profile tag to {} resource",
|
||||
resourceType);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// APP PROPERTIES
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* HAPI AppProperties — configuration object read by HAPI's Spring Boot
|
||||
* auto-configuration. We customise specific properties here and let HAPI
|
||||
* auto-configure the rest from application.yaml hapi.fhir.* properties.
|
||||
*/
|
||||
@Bean
|
||||
public AppProperties appProperties() {
|
||||
AppProperties props = new AppProperties();
|
||||
props.setServer_address(serverAddress);
|
||||
props.setFhir_version(ca.uhn.fhir.context.FhirVersionEnum.R4);
|
||||
props.setAllow_external_references(true);
|
||||
props.setAllow_multiple_delete(false);
|
||||
props.setBulk_export_enabled(true);
|
||||
props.setNarrative_enabled(false);
|
||||
return props;
|
||||
}
|
||||
|
||||
/**
|
||||
* Startup validation: verifies that the IG package exists on the classpath
|
||||
* before the application accepts any traffic. Fails fast at startup rather
|
||||
* than failing on the first validation call.
|
||||
*/
|
||||
@PostConstruct
|
||||
public void validateIgPackagePresent() {
|
||||
String classpathPath = igPackageClasspath.replace("classpath:", "");
|
||||
try (InputStream is = getClass().getClassLoader().getResourceAsStream(classpathPath)) {
|
||||
if (is == null) {
|
||||
throw new IllegalStateException(
|
||||
"STARTUP FAILURE: BD Core IG package not found at classpath:" +
|
||||
classpathPath + ". " +
|
||||
"The Docker image was built without the IG package. " +
|
||||
"Rebuild the image with the .tgz present in " +
|
||||
"src/main/resources/packages/.");
|
||||
}
|
||||
// Verify non-empty (a zero-byte file would indicate a build error)
|
||||
int firstByte = is.read();
|
||||
if (firstByte == -1) {
|
||||
throw new IllegalStateException(
|
||||
"STARTUP FAILURE: BD Core IG package at classpath:" +
|
||||
classpathPath + " is empty (zero bytes). " +
|
||||
"Rebuild the image with a valid .tgz file.");
|
||||
}
|
||||
log.info("IG package presence verified: classpath:{}", classpathPath);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(
|
||||
"Failed to verify IG package: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,175 @@
|
||||
package bd.gov.dghs.fhir.config;
|
||||
|
||||
import bd.gov.dghs.fhir.interceptor.AuditEventInterceptor;
|
||||
import bd.gov.dghs.fhir.interceptor.KeycloakJwtInterceptor;
|
||||
import ca.uhn.fhir.rest.server.RestfulServer;
|
||||
import ca.uhn.fhir.rest.server.interceptor.RequestValidatingInterceptor;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.boot.web.servlet.FilterRegistrationBean;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.Ordered;
|
||||
|
||||
import jakarta.servlet.Filter;
|
||||
import jakarta.servlet.FilterChain;
|
||||
import jakarta.servlet.ServletRequest;
|
||||
import jakarta.servlet.ServletResponse;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
|
||||
/**
|
||||
* Security configuration — wires interceptors into the HAPI RestfulServer
|
||||
* in the correct order and registers servlet filters for non-FHIR paths.
|
||||
*
|
||||
* <h2>Interceptor registration order</h2>
|
||||
* <p>HAPI processes interceptors in registration order for pre-request hooks
|
||||
* and in reverse registration order for post-request hooks.
|
||||
*
|
||||
* <p>Pre-request order (first registered = first executed):
|
||||
* <ol>
|
||||
* <li>{@link KeycloakJwtInterceptor} — auth must run before everything else.
|
||||
* Unauthenticated requests never reach validation or persistence.</li>
|
||||
* <li>{@link RequestValidatingInterceptor} — profile + OCL validation.
|
||||
* Runs after auth (no point validating unauthenticated submissions)
|
||||
* and before persistence (validates before writing).</li>
|
||||
* <li>{@link AuditEventInterceptor} — audit runs last pre-persistence.
|
||||
* Has access to auth context (set by KeycloakJwtInterceptor) and
|
||||
* validation outcome (set by RequestValidatingInterceptor).</li>
|
||||
* </ol>
|
||||
*
|
||||
* <h2>RestfulServer customiser</h2>
|
||||
* <p>HAPI 7.x provides the {@code IServerInterceptorCustomizer} mechanism
|
||||
* for registering interceptors into the auto-configured RestfulServer.
|
||||
* We implement this via a {@code HapiRestfulServerCustomizer} bean that
|
||||
* Spring picks up automatically.
|
||||
*/
|
||||
@Configuration
|
||||
public class SecurityConfig {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(SecurityConfig.class);
|
||||
|
||||
/**
|
||||
* HAPI RestfulServer customiser — registers all interceptors in correct order.
|
||||
*
|
||||
* <p>This bean is discovered by HAPI's Spring Boot auto-configuration via
|
||||
* the {@code RestfulServerCustomizer} interface. HAPI calls
|
||||
* {@code customize(RestfulServer)} after the server is constructed but
|
||||
* before it begins serving requests.
|
||||
*/
|
||||
@Bean
|
||||
public ca.uhn.fhir.jpa.starter.util.IServerInterceptorCustomizer serverInterceptorCustomizer(
|
||||
KeycloakJwtInterceptor keycloakJwtInterceptor,
|
||||
RequestValidatingInterceptor requestValidatingInterceptor,
|
||||
AuditEventInterceptor auditEventInterceptor,
|
||||
FhirServerConfig.RestfulServerConfigurer restfulServerConfigurer) {
|
||||
|
||||
return server -> {
|
||||
// Order is critical — see class Javadoc
|
||||
server.registerInterceptor(keycloakJwtInterceptor);
|
||||
server.registerInterceptor(requestValidatingInterceptor);
|
||||
server.registerInterceptor(auditEventInterceptor);
|
||||
|
||||
// Apply server metadata configuration from FhirServerConfig
|
||||
restfulServerConfigurer.configure(server);
|
||||
|
||||
// Disable HAPI's built-in ResponseHighlighterInterceptor in production.
|
||||
// It adds HTML rendering for browser requests — unnecessary overhead
|
||||
// in a machine-to-machine API. Enabled in dev profile only.
|
||||
// server.registerInterceptor(new ResponseHighlighterInterceptor());
|
||||
// ↑ Intentionally commented out — remove comment for dev profile.
|
||||
|
||||
log.info("HAPI RestfulServer interceptors registered: " +
|
||||
"KeycloakJwt → RequestValidating → AuditEvent");
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Servlet filter that enforces HTTPS-only access at the application layer.
|
||||
*
|
||||
* <p>nginx handles TLS termination and should be configured to reject
|
||||
* plain HTTP. This filter is a defence-in-depth measure: if a request
|
||||
* somehow reaches the HAPI JVM on port 8080 without going through nginx,
|
||||
* and the {@code X-Forwarded-Proto} header is absent or http, the filter
|
||||
* blocks it.
|
||||
*
|
||||
* <p>In production behind nginx: nginx sets {@code X-Forwarded-Proto: https}
|
||||
* on all proxied requests. This filter allows those through.
|
||||
*
|
||||
* <p>Exception: health check paths are allowed regardless of protocol
|
||||
* (load balancer health probes originate from the internal network
|
||||
* and do not go through nginx).
|
||||
*/
|
||||
@Bean
|
||||
public FilterRegistrationBean<Filter> httpsEnforcementFilter() {
|
||||
FilterRegistrationBean<Filter> registration = new FilterRegistrationBean<>();
|
||||
|
||||
registration.setFilter((ServletRequest req, ServletResponse res, FilterChain chain) -> {
|
||||
HttpServletRequest httpReq = (HttpServletRequest) req;
|
||||
HttpServletResponse httpRes = (HttpServletResponse) res;
|
||||
|
||||
String path = httpReq.getRequestURI();
|
||||
|
||||
// Always allow health check paths regardless of protocol
|
||||
if (path.startsWith("/actuator/health")) {
|
||||
chain.doFilter(req, res);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check X-Forwarded-Proto header set by nginx
|
||||
String forwardedProto = httpReq.getHeader("X-Forwarded-Proto");
|
||||
if (forwardedProto != null && "http".equalsIgnoreCase(forwardedProto)) {
|
||||
log.warn("Rejected plain HTTP request: path={} ip={}",
|
||||
path, httpReq.getRemoteAddr());
|
||||
httpRes.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY);
|
||||
httpRes.setHeader("Location",
|
||||
"https://fhir.dghs.gov.bd" + path);
|
||||
return;
|
||||
}
|
||||
|
||||
chain.doFilter(req, res);
|
||||
});
|
||||
|
||||
registration.addUrlPatterns("/*");
|
||||
registration.setOrder(Ordered.HIGHEST_PRECEDENCE);
|
||||
registration.setName("httpsEnforcementFilter");
|
||||
return registration;
|
||||
}
|
||||
|
||||
/**
|
||||
* Servlet filter that adds security response headers to all responses.
|
||||
*
|
||||
* <p>These headers are defence-in-depth — nginx also sets them.
|
||||
* Duplicate headers are acceptable and ensure they are present even
|
||||
* if nginx configuration changes.
|
||||
*/
|
||||
@Bean
|
||||
public FilterRegistrationBean<Filter> securityHeadersFilter() {
|
||||
FilterRegistrationBean<Filter> registration = new FilterRegistrationBean<>();
|
||||
|
||||
registration.setFilter((ServletRequest req, ServletResponse res, FilterChain chain) -> {
|
||||
HttpServletResponse httpRes = (HttpServletResponse) res;
|
||||
|
||||
// Prevent MIME type sniffing
|
||||
httpRes.setHeader("X-Content-Type-Options", "nosniff");
|
||||
// Prevent framing (clickjacking)
|
||||
httpRes.setHeader("X-Frame-Options", "DENY");
|
||||
// Strict transport security — 1 year, include subdomains
|
||||
httpRes.setHeader("Strict-Transport-Security",
|
||||
"max-age=31536000; includeSubDomains");
|
||||
// No referrer information in cross-origin requests
|
||||
httpRes.setHeader("Referrer-Policy", "no-referrer");
|
||||
// Disable caching for FHIR responses (they contain patient data)
|
||||
httpRes.setHeader("Cache-Control",
|
||||
"no-store, no-cache, must-revalidate, private");
|
||||
httpRes.setHeader("Pragma", "no-cache");
|
||||
|
||||
chain.doFilter(req, res);
|
||||
});
|
||||
|
||||
registration.addUrlPatterns("/*");
|
||||
registration.setOrder(Ordered.HIGHEST_PRECEDENCE + 1);
|
||||
registration.setName("securityHeadersFilter");
|
||||
return registration;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,317 @@
|
||||
package bd.gov.dghs.fhir.init;
|
||||
|
||||
import ca.uhn.fhir.jpa.packages.NpmPackageValidationSupport;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.transaction.annotation.Propagation;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
|
||||
/**
|
||||
* Initialises the BD Core IG package in HAPI's NPM package registry
|
||||
* using a PostgreSQL advisory lock to prevent race conditions on
|
||||
* multi-replica startup.
|
||||
*
|
||||
* <h2>Problem</h2>
|
||||
* <p>HAPI writes IG package metadata to {@code NPM_PACKAGE} and
|
||||
* {@code NPM_PACKAGE_VER} tables on first load. With N replicas starting
|
||||
* simultaneously, all N JVMs attempt to INSERT the same package record
|
||||
* concurrently. HAPI uses an upsert internally, but the window between
|
||||
* SELECT (does this package exist?) and INSERT (write the record) is not
|
||||
* atomic — concurrent replicas race through this window and produce either
|
||||
* duplicate key errors (noisy but harmless) or, in rare cases under high
|
||||
* contention, partial writes that leave the NPM_PACKAGE table in an
|
||||
* inconsistent state.
|
||||
*
|
||||
* <h2>Solution</h2>
|
||||
* <p>Before loading the IG, each replica attempts to acquire a PostgreSQL
|
||||
* session-level advisory lock with a deterministic lock ID derived from
|
||||
* the IG package name. Only one replica acquires the lock at a time.
|
||||
* The first replica loads the package and writes the metadata. Subsequent
|
||||
* replicas acquire the lock after the first releases it, find the package
|
||||
* already registered, and skip the insert — producing a clean INFO log
|
||||
* rather than an ERROR.
|
||||
*
|
||||
* <h2>Advisory lock key</h2>
|
||||
* <p>PostgreSQL advisory lock keys are 64-bit integers. We derive the key
|
||||
* by taking {@code Math.abs(packageId.hashCode())} where {@code packageId}
|
||||
* is the BD Core IG package ID string. This is deterministic across all
|
||||
* replicas (same JVM hashCode for the same string on the same JVM version)
|
||||
* and unique enough for this single-package use case.
|
||||
*
|
||||
* <p>Note: Java {@code String.hashCode()} is stable within a JVM instance
|
||||
* but is NOT guaranteed to be stable across JVM versions. Since all replicas
|
||||
* run the same Docker image (same JVM version), this is safe. If you ever
|
||||
* run replicas with different JVM versions simultaneously (you should not),
|
||||
* replace with a CRC32 or FNV hash.
|
||||
*
|
||||
* <h2>Lock release</h2>
|
||||
* <p>The advisory lock is released explicitly after IG load completes.
|
||||
* If the JVM crashes mid-load, PostgreSQL releases session-level advisory
|
||||
* locks automatically when the connection closes — no manual cleanup required.
|
||||
*
|
||||
* <h2>Implements InitializingBean</h2>
|
||||
* <p>{@link InitializingBean#afterPropertiesSet()} is called by Spring after
|
||||
* all {@code @Value} fields are injected and all dependencies are available,
|
||||
* but before the application context is marked as ready to serve requests.
|
||||
* This is the correct lifecycle hook for startup initialisation that must
|
||||
* complete before the server accepts traffic.
|
||||
*/
|
||||
@Component
|
||||
public class IgPackageInitializer implements InitializingBean {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(IgPackageInitializer.class);
|
||||
|
||||
/**
|
||||
* Advisory lock key namespace — all BD FHIR IG locks use this prefix
|
||||
* to avoid collision with any other advisory locks in the database.
|
||||
* Value chosen arbitrarily — just needs to be a consistent namespace constant.
|
||||
*/
|
||||
private static final long ADVISORY_LOCK_NAMESPACE = 0xBD_FHIR_00L;
|
||||
|
||||
/**
|
||||
* Maximum time to wait for the advisory lock before giving up.
|
||||
* If lock acquisition exceeds this duration, the replica has likely
|
||||
* encountered a deadlock or the lock-holding replica has crashed mid-load.
|
||||
* Fail startup rather than waiting indefinitely.
|
||||
*/
|
||||
private static final Duration LOCK_TIMEOUT = Duration.ofSeconds(120);
|
||||
|
||||
private final NpmPackageValidationSupport npmPackageValidationSupport;
|
||||
private final JdbcTemplate fhirJdbcTemplate;
|
||||
|
||||
@Value("${bd.fhir.ig.package-classpath}")
|
||||
private String igPackageClasspath;
|
||||
|
||||
@Value("${bd.fhir.ig.version}")
|
||||
private String igVersion;
|
||||
|
||||
public IgPackageInitializer(
|
||||
NpmPackageValidationSupport npmPackageValidationSupport,
|
||||
JdbcTemplate fhirJdbcTemplate) {
|
||||
this.npmPackageValidationSupport = npmPackageValidationSupport;
|
||||
this.fhirJdbcTemplate = fhirJdbcTemplate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by Spring after all properties are injected.
|
||||
* Acquires advisory lock, loads IG, releases lock.
|
||||
*/
|
||||
@Override
|
||||
@Transactional(propagation = Propagation.NOT_SUPPORTED)
|
||||
// NOT_SUPPORTED: advisory lock must be on a direct JDBC connection,
|
||||
// not inside a Spring-managed transaction. Advisory locks acquired
|
||||
// inside a transaction are released when the transaction commits —
|
||||
// we need the lock held until after the IG load completes, which
|
||||
// spans multiple internal HAPI transactions.
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
String packageId = derivePackageId(igPackageClasspath);
|
||||
long lockKey = deriveLockKey(packageId);
|
||||
|
||||
log.info("IG package initialisation starting: packageId={} version={} lockKey={}",
|
||||
packageId, igVersion, lockKey);
|
||||
|
||||
Instant lockStart = Instant.now();
|
||||
|
||||
// Attempt to acquire advisory lock with timeout.
|
||||
// pg_try_advisory_lock returns true immediately if lock acquired,
|
||||
// false if already held by another session.
|
||||
// We poll with backoff rather than using pg_advisory_lock (which blocks
|
||||
// indefinitely) to respect LOCK_TIMEOUT.
|
||||
boolean lockAcquired = false;
|
||||
long backoffMs = 250;
|
||||
|
||||
while (Duration.between(lockStart, Instant.now()).compareTo(LOCK_TIMEOUT) < 0) {
|
||||
Boolean acquired = fhirJdbcTemplate.queryForObject(
|
||||
"SELECT pg_try_advisory_lock(?)",
|
||||
Boolean.class,
|
||||
lockKey);
|
||||
|
||||
if (Boolean.TRUE.equals(acquired)) {
|
||||
lockAcquired = true;
|
||||
log.info("Advisory lock acquired: lockKey={} waitedMs={}",
|
||||
lockKey,
|
||||
Duration.between(lockStart, Instant.now()).toMillis());
|
||||
break;
|
||||
}
|
||||
|
||||
log.debug("Advisory lock contended — waiting {}ms: lockKey={}", backoffMs, lockKey);
|
||||
Thread.sleep(backoffMs);
|
||||
backoffMs = Math.min(backoffMs * 2, 5000); // exponential backoff, cap at 5s
|
||||
}
|
||||
|
||||
if (!lockAcquired) {
|
||||
throw new IllegalStateException(
|
||||
"Failed to acquire IG package advisory lock within " +
|
||||
LOCK_TIMEOUT.getSeconds() + " seconds. " +
|
||||
"lockKey=" + lockKey + ". " +
|
||||
"This may indicate a crashed replica holding the lock — " +
|
||||
"check PostgreSQL pg_locks for session holding key " + lockKey + ".");
|
||||
}
|
||||
|
||||
try {
|
||||
performIgLoad(packageId);
|
||||
} finally {
|
||||
// Always release the lock, even if IG load fails.
|
||||
// Other replicas are blocked waiting for this lock —
|
||||
// they will see the partial state and attempt their own load.
|
||||
try {
|
||||
fhirJdbcTemplate.queryForObject(
|
||||
"SELECT pg_advisory_unlock(?)",
|
||||
Boolean.class,
|
||||
lockKey);
|
||||
log.info("Advisory lock released: lockKey={}", lockKey);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to release advisory lock: lockKey={} error={}",
|
||||
lockKey, e.getMessage());
|
||||
// Non-fatal: PostgreSQL releases session locks on connection close.
|
||||
// The connection will be returned to HikariCP and eventually
|
||||
// closed/recycled, releasing the lock automatically.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// IG load logic
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Performs the actual IG package registration.
|
||||
*
|
||||
* <p>Checks whether the package is already registered before attempting
|
||||
* to load. This handles the case where replicas 2..N acquire the lock
|
||||
* after replica 1 has already loaded and released it.
|
||||
*/
|
||||
private void performIgLoad(String packageId) {
|
||||
// Check if already registered by a previous replica or a previous run
|
||||
boolean alreadyLoaded = isPackageAlreadyRegistered(packageId, igVersion);
|
||||
|
||||
if (alreadyLoaded) {
|
||||
log.info("IG package already registered in NPM_PACKAGE — " +
|
||||
"skipping load (this replica is not first): packageId={} version={}",
|
||||
packageId, igVersion);
|
||||
// NpmPackageValidationSupport was already loaded from classpath
|
||||
// in FhirServerConfig.npmPackageValidationSupport() bean.
|
||||
// The in-memory validation support is ready regardless of DB state.
|
||||
return;
|
||||
}
|
||||
|
||||
// First replica: load the package
|
||||
log.info("Loading BD Core IG package: packageId={} version={} classpath={}",
|
||||
packageId, igVersion, igPackageClasspath);
|
||||
|
||||
Instant loadStart = Instant.now();
|
||||
|
||||
try {
|
||||
// NpmPackageValidationSupport.loadPackageFromClasspath() does two things:
|
||||
// 1. Loads StructureDefinitions, ValueSets, CodeSystems into in-memory cache
|
||||
// 2. Writes NPM_PACKAGE and NPM_PACKAGE_VER records to the database
|
||||
// The in-memory load already happened in FhirServerConfig (bean initialisation).
|
||||
// Here we ensure the database records are written exactly once.
|
||||
String classpathPath = igPackageClasspath.replace("classpath:", "");
|
||||
npmPackageValidationSupport.loadPackageFromClasspath(
|
||||
"classpath:" + classpathPath);
|
||||
|
||||
long loadMs = Duration.between(loadStart, Instant.now()).toMillis();
|
||||
log.info("BD Core IG package loaded successfully: packageId={} version={} " +
|
||||
"durationMs={}", packageId, igVersion, loadMs);
|
||||
|
||||
} catch (Exception e) {
|
||||
// If loading fails, log the error but do not crash startup.
|
||||
// The in-memory validation support loaded in FhirServerConfig
|
||||
// is the primary validation mechanism. DB registration failure
|
||||
// is an operational issue but not a functional blocker for validation.
|
||||
log.error("IG package DB registration failed (in-memory validation " +
|
||||
"still active): packageId={} version={} error={}",
|
||||
packageId, igVersion, e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether the IG package is already registered in the NPM_PACKAGE table.
|
||||
*
|
||||
* <p>Queries {@code NPM_PACKAGE} joined to {@code NPM_PACKAGE_VER} to check
|
||||
* both package existence and version match. A package registered at a different
|
||||
* version (e.g., 0.1.0 when deploying 0.2.1) is treated as not registered —
|
||||
* the new version will be added alongside the old one.
|
||||
*/
|
||||
private boolean isPackageAlreadyRegistered(String packageId, String version) {
|
||||
try {
|
||||
Integer count = fhirJdbcTemplate.queryForObject(
|
||||
"SELECT COUNT(*) FROM NPM_PACKAGE np " +
|
||||
"JOIN NPM_PACKAGE_VER npv ON npv.PKG_PID = np.PID " +
|
||||
"WHERE np.PACKAGE_ID = ? AND npv.VERSION_ID = ?",
|
||||
Integer.class,
|
||||
packageId, version);
|
||||
return count != null && count > 0;
|
||||
} catch (Exception e) {
|
||||
// Query failure (e.g., table not yet created) — treat as not registered.
|
||||
// Flyway should have run V1 before this is called, but defensive check.
|
||||
log.warn("Could not query NPM_PACKAGE table (Flyway may not have run yet): {}",
|
||||
e.getMessage());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Helpers
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Derives the NPM package ID from the classpath path.
|
||||
*
|
||||
* <p>The package ID is the FHIR NPM package identifier embedded in the
|
||||
* .tgz filename before the version suffix.
|
||||
* Example: {@code packages/bd.gov.dghs.core-0.2.1.tgz} → {@code bd.gov.dghs.core}
|
||||
*/
|
||||
private String derivePackageId(String classpathPath) {
|
||||
String filename = classpathPath;
|
||||
// Strip classpath: prefix and directory
|
||||
int lastSlash = filename.lastIndexOf('/');
|
||||
if (lastSlash >= 0) {
|
||||
filename = filename.substring(lastSlash + 1);
|
||||
}
|
||||
// Strip .tgz extension
|
||||
if (filename.endsWith(".tgz")) {
|
||||
filename = filename.substring(0, filename.length() - 4);
|
||||
}
|
||||
// Strip version suffix (last hyphen-separated segment that starts with digit)
|
||||
int lastHyphen = filename.lastIndexOf('-');
|
||||
if (lastHyphen > 0) {
|
||||
String versionPart = filename.substring(lastHyphen + 1);
|
||||
if (!versionPart.isEmpty() && Character.isDigit(versionPart.charAt(0))) {
|
||||
filename = filename.substring(0, lastHyphen);
|
||||
}
|
||||
}
|
||||
return filename; // e.g., "bd.gov.dghs.core"
|
||||
}
|
||||
|
||||
/**
|
||||
* Derives a stable 64-bit advisory lock key from a package ID string.
|
||||
*
|
||||
* <p>Combines the namespace constant with the package ID hash to produce
|
||||
* a key that is:
|
||||
* <ul>
|
||||
* <li>Deterministic: same packageId always produces the same key</li>
|
||||
* <li>Namespaced: BD FHIR locks are distinguishable from other advisory locks</li>
|
||||
* <li>Positive: PostgreSQL advisory lock keys must be valid long values</li>
|
||||
* </ul>
|
||||
*/
|
||||
private long deriveLockKey(String packageId) {
|
||||
// Use a djb2-style hash for stability across JVM invocations.
|
||||
// Java String.hashCode() is stable within a JVM version but the spec
|
||||
// does not guarantee cross-version stability. djb2 is fully specified.
|
||||
long hash = 5381L;
|
||||
for (char c : packageId.toCharArray()) {
|
||||
hash = ((hash << 5) + hash) + c;
|
||||
}
|
||||
// XOR with namespace to distinguish from unrelated advisory locks
|
||||
return ADVISORY_LOCK_NAMESPACE ^ Math.abs(hash);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,601 @@
|
||||
package bd.gov.dghs.fhir.interceptor;
|
||||
|
||||
import bd.gov.dghs.fhir.audit.AuditEventEmitter;
|
||||
import bd.gov.dghs.fhir.audit.RejectedSubmissionSink;
|
||||
import bd.gov.dghs.fhir.validator.ClusterExpressionValidator;
|
||||
import ca.uhn.fhir.interceptor.api.Hook;
|
||||
import ca.uhn.fhir.interceptor.api.Interceptor;
|
||||
import ca.uhn.fhir.interceptor.api.Pointcut;
|
||||
import ca.uhn.fhir.rest.api.RestOperationTypeEnum;
|
||||
import ca.uhn.fhir.rest.api.server.RequestDetails;
|
||||
import ca.uhn.fhir.rest.server.exceptions.BaseServerResponseException;
|
||||
import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
|
||||
import org.hl7.fhir.instance.model.api.IBaseResource;
|
||||
import org.hl7.fhir.r4.model.OperationOutcome;
|
||||
import org.hl7.fhir.r4.model.Resource;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* HAPI FHIR interceptor that records every resource submission outcome
|
||||
* to the immutable audit store.
|
||||
*
|
||||
* <h2>Interceptor responsibilities</h2>
|
||||
* <ol>
|
||||
* <li><b>Cluster expression pre-validation</b> — invokes
|
||||
* {@link ClusterExpressionValidator} before HAPI's profile validation
|
||||
* runs. Cluster expression failures are converted to 422 responses
|
||||
* and recorded as REJECTED in the audit trail.</li>
|
||||
* <li><b>Accepted resource audit</b> — after successful storage, emits
|
||||
* an {@code AuditEvent} to {@link AuditEventEmitter} (async).</li>
|
||||
* <li><b>Rejected resource audit</b> — on any 422 or 401 exception,
|
||||
* captures the full resource payload and rejection details to
|
||||
* {@link RejectedSubmissionSink} (async) and emits an AuditEvent.</li>
|
||||
* </ol>
|
||||
*
|
||||
* <h2>Hooks and ordering</h2>
|
||||
* <pre>
|
||||
* SERVER_INCOMING_REQUEST_PRE_HANDLED ← KeycloakJwtInterceptor (Step 8)
|
||||
* SERVER_INCOMING_REQUEST_PRE_HANDLED ← This class: cluster expression validation
|
||||
* (registered after Keycloak, runs second)
|
||||
* [HAPI profile validation runs here — RequestValidatingInterceptor]
|
||||
* [HAPI persistence runs here]
|
||||
* STORAGE_PRESTORAGE_RESOURCE_CREATED ← This class: unvalidated-profile tag check
|
||||
* SERVER_OUTGOING_RESPONSE ← This class: accepted resource audit
|
||||
* SERVER_PROCESSING_COMPLETED ← This class: exception path audit
|
||||
* </pre>
|
||||
*
|
||||
* <h2>Async audit writes</h2>
|
||||
* <p>All audit writes ({@link AuditEventEmitter} and {@link RejectedSubmissionSink})
|
||||
* are executed on Spring's async task executor (configured in application.yaml
|
||||
* {@code spring.task.execution}). The FHIR request thread returns the HTTP response
|
||||
* to the vendor immediately — audit writes do not add to response latency.
|
||||
*
|
||||
* <p>Consequence: in the rare case of audit write failure (postgres-audit unavailable),
|
||||
* the FHIR operation succeeds but the audit record is missing. The audit write
|
||||
* failure is logged at ERROR level. DGHS must treat audit write failures as
|
||||
* high-priority incidents — a gap in the audit trail violates the immutability
|
||||
* requirement. If this is unacceptable, change audit writes to synchronous and
|
||||
* accept the latency cost.
|
||||
*
|
||||
* <h2>Request attribute contract</h2>
|
||||
* <p>This class reads request attributes set by {@link KeycloakJwtInterceptor}:
|
||||
* <ul>
|
||||
* <li>{@code BD_FHIR_CLIENT_ID} — Keycloak client_id</li>
|
||||
* <li>{@code BD_FHIR_FACILITY} — sending facility identifier</li>
|
||||
* <li>{@code BD_FHIR_SUBJECT} — JWT sub claim</li>
|
||||
* <li>{@code BD_FHIR_REQUEST_ID} — per-request UUID</li>
|
||||
* <li>{@code BD_FHIR_AUTH_EXEMPTED} — true for unauthenticated exempted paths</li>
|
||||
* </ul>
|
||||
* If KeycloakJwtInterceptor has not set these (e.g., on exempted paths),
|
||||
* fallback values are used — exempted requests are not audit-logged for
|
||||
* resource operations.
|
||||
*/
|
||||
@Interceptor
|
||||
@Component
|
||||
public class AuditEventInterceptor {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(AuditEventInterceptor.class);
|
||||
|
||||
private final ClusterExpressionValidator clusterExpressionValidator;
|
||||
private final AuditEventEmitter auditEventEmitter;
|
||||
private final RejectedSubmissionSink rejectedSubmissionSink;
|
||||
|
||||
public AuditEventInterceptor(
|
||||
ClusterExpressionValidator clusterExpressionValidator,
|
||||
AuditEventEmitter auditEventEmitter,
|
||||
RejectedSubmissionSink rejectedSubmissionSink) {
|
||||
this.clusterExpressionValidator = clusterExpressionValidator;
|
||||
this.auditEventEmitter = auditEventEmitter;
|
||||
this.rejectedSubmissionSink = rejectedSubmissionSink;
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Hook 1: Cluster expression validation — pre-storage, pre-profile-validation
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Pre-request hook — runs cluster expression validation before HAPI's
|
||||
* own profile validation and before any database write.
|
||||
*
|
||||
* <p>Only runs for write operations (CREATE, UPDATE, PATCH).
|
||||
* Read operations (search, read, vread) do not contain resources
|
||||
* to validate and are passed through immediately.
|
||||
*
|
||||
* <p>On cluster expression failure: throws {@link UnprocessableEntityException}
|
||||
* with a FHIR OperationOutcome. HAPI returns this as HTTP 422. The exception
|
||||
* is caught by {@link #handleProcessingException} for audit logging.
|
||||
*/
|
||||
@Hook(Pointcut.SERVER_INCOMING_REQUEST_PRE_HANDLED)
|
||||
public void validateClusterExpressions(
|
||||
RequestDetails requestDetails,
|
||||
HttpServletRequest servletRequest,
|
||||
HttpServletResponse servletResponse) {
|
||||
|
||||
// Skip reads and non-resource operations
|
||||
if (!isWriteOperation(requestDetails)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip if auth was exempted (metadata endpoint, etc.)
|
||||
if (Boolean.TRUE.equals(
|
||||
servletRequest.getAttribute(KeycloakJwtInterceptor.REQUEST_ATTR_AUTH_EXEMPTED))) {
|
||||
return;
|
||||
}
|
||||
|
||||
IBaseResource resource = requestDetails.getResource();
|
||||
if (!(resource instanceof Resource r4Resource)) {
|
||||
return; // No resource body or not R4 — let HAPI handle it
|
||||
}
|
||||
|
||||
try {
|
||||
clusterExpressionValidator.validateResource(r4Resource, requestDetails);
|
||||
} catch (UnprocessableEntityException e) {
|
||||
// Cluster validation failed — store resource payload for forensic audit
|
||||
// before re-throwing (the exception path audit runs in handleProcessingException)
|
||||
storeClusterRejection(servletRequest, r4Resource, e);
|
||||
throw e; // HAPI catches this and returns 422
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Hook 2: Accepted resource — post-storage audit
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Post-storage hook — fires after a resource is successfully created.
|
||||
* Emits an ACCEPTED audit event asynchronously.
|
||||
*/
|
||||
@Hook(Pointcut.STORAGE_PRESTORAGE_RESOURCE_CREATED)
|
||||
public void auditResourceCreated(
|
||||
RequestDetails requestDetails,
|
||||
IBaseResource resource) {
|
||||
auditAcceptedOperation(requestDetails, resource, "CREATE");
|
||||
}
|
||||
|
||||
/**
|
||||
* Post-storage hook — fires after a resource is successfully updated.
|
||||
*/
|
||||
@Hook(Pointcut.STORAGE_PRESTORAGE_RESOURCE_UPDATED)
|
||||
public void auditResourceUpdated(
|
||||
RequestDetails requestDetails,
|
||||
IBaseResource oldResource,
|
||||
IBaseResource newResource) {
|
||||
auditAcceptedOperation(requestDetails, newResource, "UPDATE");
|
||||
}
|
||||
|
||||
/**
|
||||
* Post-storage hook — fires after a resource is successfully deleted.
|
||||
*/
|
||||
@Hook(Pointcut.STORAGE_PRESTORAGE_RESOURCE_DELETED)
|
||||
public void auditResourceDeleted(
|
||||
RequestDetails requestDetails,
|
||||
IBaseResource resource) {
|
||||
auditAcceptedOperation(requestDetails, resource, "DELETE");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Hook 3: Exception path — rejected resource audit
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Exception hook — fires when any HAPI processing exception occurs.
|
||||
*
|
||||
* <p>Handles all rejection paths:
|
||||
* <ul>
|
||||
* <li>422 from profile validation failure</li>
|
||||
* <li>422 from OCL terminology rejection</li>
|
||||
* <li>422 from cluster expression rejection (thrown in Hook 1)</li>
|
||||
* <li>401 from auth failure (KeycloakJwtInterceptor)</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>For 422 responses: stores the full resource payload in
|
||||
* {@code audit.fhir_rejected_submissions} with rejection details.
|
||||
* For 401 responses: records auth failure event only (no resource payload).
|
||||
*/
|
||||
@Hook(Pointcut.SERVER_HANDLE_EXCEPTION)
|
||||
public boolean handleProcessingException(
|
||||
RequestDetails requestDetails,
|
||||
HttpServletRequest servletRequest,
|
||||
HttpServletResponse servletResponse,
|
||||
BaseServerResponseException exception) {
|
||||
|
||||
int statusCode = exception.getStatusCode();
|
||||
|
||||
// Only audit 401 and 422 — other errors (404, 500) are operational,
|
||||
// not submission rejections.
|
||||
if (statusCode == 422) {
|
||||
auditRejectedSubmission(requestDetails, servletRequest, exception,
|
||||
classifyRejectionCode(exception));
|
||||
} else if (statusCode == 401) {
|
||||
auditAuthFailure(requestDetails, servletRequest, exception);
|
||||
}
|
||||
|
||||
// Return false = do not suppress the exception — HAPI returns the HTTP response
|
||||
return false;
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Audit emission helpers
|
||||
// =========================================================================
|
||||
|
||||
private void auditAcceptedOperation(
|
||||
RequestDetails requestDetails,
|
||||
IBaseResource resource,
|
||||
String operation) {
|
||||
|
||||
if (isExemptedRequest(requestDetails)) {
|
||||
return;
|
||||
}
|
||||
|
||||
AuditContext ctx = extractAuditContext(requestDetails);
|
||||
String resourceType = resource != null ?
|
||||
resource.fhirType() : requestDetails.getResourceName();
|
||||
String resourceId = resource instanceof Resource r4 ?
|
||||
r4.getIdElement().getIdPart() : null;
|
||||
|
||||
log.info("Resource {} accepted: resourceType={} resourceId={} " +
|
||||
"clientId={} facility={} requestId={}",
|
||||
operation, resourceType, resourceId,
|
||||
ctx.clientId, ctx.facility, ctx.requestId);
|
||||
|
||||
// Async audit write — does not block response
|
||||
auditEventEmitter.emitAsync(AuditEventEmitter.AuditRecord.builder()
|
||||
.eventId(UUID.randomUUID())
|
||||
.eventTime(Instant.now())
|
||||
.eventType("OPERATION")
|
||||
.operation(operation)
|
||||
.resourceType(resourceType)
|
||||
.resourceId(resourceId)
|
||||
.outcome("ACCEPTED")
|
||||
.clientId(ctx.clientId)
|
||||
.subject(ctx.subject)
|
||||
.sendingFacility(ctx.facility)
|
||||
.requestIp(ctx.requestIp)
|
||||
.requestId(ctx.requestId)
|
||||
.build());
|
||||
}
|
||||
|
||||
private void auditRejectedSubmission(
|
||||
RequestDetails requestDetails,
|
||||
HttpServletRequest servletRequest,
|
||||
BaseServerResponseException exception,
|
||||
String rejectionCode) {
|
||||
|
||||
AuditContext ctx = extractAuditContext(requestDetails, servletRequest);
|
||||
String resourceType = requestDetails.getResourceName();
|
||||
String operation = deriveOperation(requestDetails);
|
||||
|
||||
// Extract OperationOutcome messages for structured logging
|
||||
List<String> issueMessages = extractIssueMessages(exception);
|
||||
String primaryMessage = issueMessages.isEmpty() ?
|
||||
exception.getMessage() : issueMessages.get(0);
|
||||
|
||||
log.info("Resource submission rejected: resourceType={} rejectionCode={} " +
|
||||
"clientId={} facility={} requestId={} reason={}",
|
||||
resourceType, rejectionCode, ctx.clientId,
|
||||
ctx.facility, ctx.requestId, primaryMessage);
|
||||
|
||||
// Store rejected payload if this was a write operation with a resource body
|
||||
IBaseResource resource = requestDetails.getResource();
|
||||
if (resource != null && isWriteOperation(requestDetails)) {
|
||||
rejectedSubmissionSink.storeAsync(
|
||||
RejectedSubmissionSink.RejectedSubmission.builder()
|
||||
.submissionId(UUID.randomUUID())
|
||||
.submissionTime(Instant.now())
|
||||
.eventId(UUID.randomUUID())
|
||||
.resourceType(resourceType)
|
||||
.resourcePayload(serializeResource(requestDetails, resource))
|
||||
.rejectionCode(rejectionCode)
|
||||
.rejectionReason(primaryMessage)
|
||||
.elementPath(extractFirstElementPath(exception))
|
||||
.violatedProfile(extractViolatedProfile(exception))
|
||||
.invalidCode(extractInvalidCode(exception))
|
||||
.invalidSystem(extractInvalidSystem(exception))
|
||||
.sendingFacility(ctx.facility)
|
||||
.clientId(ctx.clientId)
|
||||
.build());
|
||||
}
|
||||
|
||||
// Emit audit event
|
||||
auditEventEmitter.emitAsync(AuditEventEmitter.AuditRecord.builder()
|
||||
.eventId(UUID.randomUUID())
|
||||
.eventTime(Instant.now())
|
||||
.eventType("VALIDATION_FAILURE")
|
||||
.operation(operation)
|
||||
.resourceType(resourceType)
|
||||
.outcome("REJECTED")
|
||||
.outcomeDetail(primaryMessage)
|
||||
.clientId(ctx.clientId)
|
||||
.subject(ctx.subject)
|
||||
.sendingFacility(ctx.facility)
|
||||
.requestIp(ctx.requestIp)
|
||||
.requestId(ctx.requestId)
|
||||
.validationMessages(issueMessages)
|
||||
.build());
|
||||
}
|
||||
|
||||
private void auditAuthFailure(
|
||||
RequestDetails requestDetails,
|
||||
HttpServletRequest servletRequest,
|
||||
BaseServerResponseException exception) {
|
||||
|
||||
AuditContext ctx = extractAuditContext(requestDetails, servletRequest);
|
||||
|
||||
log.info("Auth failure: clientId={} requestId={} ip={} reason={}",
|
||||
ctx.clientId, ctx.requestId, ctx.requestIp, exception.getMessage());
|
||||
|
||||
auditEventEmitter.emitAsync(AuditEventEmitter.AuditRecord.builder()
|
||||
.eventId(UUID.randomUUID())
|
||||
.eventTime(Instant.now())
|
||||
.eventType("AUTH_FAILURE")
|
||||
.operation(deriveOperation(requestDetails))
|
||||
.resourceType(requestDetails.getResourceName())
|
||||
.outcome("REJECTED")
|
||||
.outcomeDetail(exception.getMessage())
|
||||
.clientId(ctx.clientId != null ? ctx.clientId : "unauthenticated")
|
||||
.subject(ctx.subject != null ? ctx.subject : "unknown")
|
||||
.sendingFacility(ctx.facility)
|
||||
.requestIp(ctx.requestIp)
|
||||
.requestId(ctx.requestId)
|
||||
.build());
|
||||
}
|
||||
|
||||
private void storeClusterRejection(
|
||||
HttpServletRequest servletRequest,
|
||||
Resource r4Resource,
|
||||
UnprocessableEntityException e) {
|
||||
|
||||
AuditContext ctx = extractAuditContextFromServlet(servletRequest);
|
||||
String resourceType = r4Resource.getResourceType().name();
|
||||
|
||||
rejectedSubmissionSink.storeAsync(
|
||||
RejectedSubmissionSink.RejectedSubmission.builder()
|
||||
.submissionId(UUID.randomUUID())
|
||||
.submissionTime(Instant.now())
|
||||
.eventId(UUID.randomUUID())
|
||||
.resourceType(resourceType)
|
||||
.resourcePayload(serializeR4Resource(r4Resource))
|
||||
.rejectionCode("CLUSTER_EXPRESSION_INVALID")
|
||||
.rejectionReason(extractFirstMessage(e))
|
||||
.elementPath(extractFirstElementPath(e))
|
||||
.sendingFacility(ctx.facility)
|
||||
.clientId(ctx.clientId)
|
||||
.build());
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Context extraction helpers
|
||||
// =========================================================================
|
||||
|
||||
private AuditContext extractAuditContext(RequestDetails requestDetails) {
|
||||
HttpServletRequest req = (HttpServletRequest)
|
||||
requestDetails.getServletRequest();
|
||||
return extractAuditContext(requestDetails, req);
|
||||
}
|
||||
|
||||
private AuditContext extractAuditContext(
|
||||
RequestDetails requestDetails, HttpServletRequest servletRequest) {
|
||||
if (servletRequest == null) {
|
||||
return new AuditContext("unknown", "unknown", "unknown", "unknown", "unknown");
|
||||
}
|
||||
return extractAuditContextFromServlet(servletRequest);
|
||||
}
|
||||
|
||||
private AuditContext extractAuditContextFromServlet(HttpServletRequest req) {
|
||||
String clientId = attrString(req, KeycloakJwtInterceptor.REQUEST_ATTR_CLIENT_ID, "unknown");
|
||||
String facility = attrString(req, KeycloakJwtInterceptor.REQUEST_ATTR_FACILITY, "unknown");
|
||||
String subject = attrString(req, KeycloakJwtInterceptor.REQUEST_ATTR_SUBJECT, "unknown");
|
||||
String requestId = attrString(req, KeycloakJwtInterceptor.REQUEST_ATTR_REQUEST_ID, "unknown");
|
||||
String ip = extractIp(req);
|
||||
return new AuditContext(clientId, facility, subject, requestId, ip);
|
||||
}
|
||||
|
||||
private String attrString(HttpServletRequest req, String attr, String fallback) {
|
||||
Object val = req.getAttribute(attr);
|
||||
return val != null ? val.toString() : fallback;
|
||||
}
|
||||
|
||||
private String extractIp(HttpServletRequest req) {
|
||||
String xff = req.getHeader("X-Forwarded-For");
|
||||
if (xff != null && !xff.isBlank()) {
|
||||
int comma = xff.indexOf(',');
|
||||
return comma > 0 ? xff.substring(0, comma).trim() : xff.trim();
|
||||
}
|
||||
return req.getRemoteAddr();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Rejection classification
|
||||
// =========================================================================
|
||||
|
||||
private String classifyRejectionCode(BaseServerResponseException exception) {
|
||||
if (!(exception instanceof UnprocessableEntityException uee)) {
|
||||
return "PROFILE_VIOLATION";
|
||||
}
|
||||
|
||||
// Try to classify by OperationOutcome issue content
|
||||
OperationOutcome oo = extractOperationOutcome(uee);
|
||||
if (oo == null) return "PROFILE_VIOLATION";
|
||||
|
||||
for (var issue : oo.getIssue()) {
|
||||
String diag = issue.getDiagnostics();
|
||||
if (diag == null) continue;
|
||||
String lower = diag.toLowerCase();
|
||||
|
||||
if (lower.contains("cluster") && lower.contains("extension")) {
|
||||
return "CLUSTER_STEM_MISSING_EXTENSION";
|
||||
}
|
||||
if (lower.contains("cluster")) {
|
||||
return "CLUSTER_EXPRESSION_INVALID";
|
||||
}
|
||||
if (lower.contains("icd") || lower.contains("terminology") ||
|
||||
lower.contains("code") && lower.contains("not valid")) {
|
||||
if (lower.contains("class") || lower.contains("device") ||
|
||||
lower.contains("substance")) {
|
||||
return "TERMINOLOGY_INVALID_CLASS";
|
||||
}
|
||||
return "TERMINOLOGY_INVALID_CODE";
|
||||
}
|
||||
}
|
||||
return "PROFILE_VIOLATION";
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// OperationOutcome parsing helpers
|
||||
// =========================================================================
|
||||
|
||||
private OperationOutcome extractOperationOutcome(BaseServerResponseException e) {
|
||||
if (e.getOperationOutcome() instanceof OperationOutcome oo) {
|
||||
return oo;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private List<String> extractIssueMessages(BaseServerResponseException e) {
|
||||
OperationOutcome oo = extractOperationOutcome(e);
|
||||
if (oo == null) return List.of(e.getMessage() != null ? e.getMessage() : "Unknown error");
|
||||
return oo.getIssue().stream()
|
||||
.filter(i -> i.getSeverity() == OperationOutcome.IssueSeverity.ERROR ||
|
||||
i.getSeverity() == OperationOutcome.IssueSeverity.FATAL)
|
||||
.map(i -> i.getDiagnostics() != null ? i.getDiagnostics() : i.getCode().toCode())
|
||||
.toList();
|
||||
}
|
||||
|
||||
private String extractFirstMessage(UnprocessableEntityException e) {
|
||||
List<String> msgs = extractIssueMessages(e);
|
||||
return msgs.isEmpty() ? e.getMessage() : msgs.get(0);
|
||||
}
|
||||
|
||||
private String extractFirstElementPath(BaseServerResponseException e) {
|
||||
OperationOutcome oo = extractOperationOutcome(e);
|
||||
if (oo == null) return null;
|
||||
return oo.getIssue().stream()
|
||||
.filter(i -> !i.getExpression().isEmpty())
|
||||
.map(i -> i.getExpression().get(0).getValue())
|
||||
.findFirst().orElse(null);
|
||||
}
|
||||
|
||||
private String extractViolatedProfile(BaseServerResponseException e) {
|
||||
OperationOutcome oo = extractOperationOutcome(e);
|
||||
if (oo == null) return null;
|
||||
// Look for a profile URL in diagnostics
|
||||
return oo.getIssue().stream()
|
||||
.map(OperationOutcome.OperationOutcomeIssueComponent::getDiagnostics)
|
||||
.filter(d -> d != null && d.contains("https://fhir.dghs.gov.bd"))
|
||||
.findFirst().orElse(null);
|
||||
}
|
||||
|
||||
private String extractInvalidCode(BaseServerResponseException e) {
|
||||
OperationOutcome oo = extractOperationOutcome(e);
|
||||
if (oo == null) return null;
|
||||
// Look for a code value in diagnostics — heuristic extraction
|
||||
return oo.getIssue().stream()
|
||||
.map(OperationOutcome.OperationOutcomeIssueComponent::getDiagnostics)
|
||||
.filter(d -> d != null && d.contains("code="))
|
||||
.map(d -> {
|
||||
int idx = d.indexOf("code=");
|
||||
if (idx < 0) return null;
|
||||
String rest = d.substring(idx + 5);
|
||||
int end = rest.indexOf(' ');
|
||||
return end > 0 ? rest.substring(0, end) : rest;
|
||||
})
|
||||
.findFirst().orElse(null);
|
||||
}
|
||||
|
||||
private String extractInvalidSystem(BaseServerResponseException e) {
|
||||
OperationOutcome oo = extractOperationOutcome(e);
|
||||
if (oo == null) return null;
|
||||
return oo.getIssue().stream()
|
||||
.map(OperationOutcome.OperationOutcomeIssueComponent::getDiagnostics)
|
||||
.filter(d -> d != null && d.contains("system="))
|
||||
.map(d -> {
|
||||
int idx = d.indexOf("system=");
|
||||
if (idx < 0) return null;
|
||||
String rest = d.substring(idx + 7);
|
||||
int end = rest.indexOf(' ');
|
||||
return end > 0 ? rest.substring(0, end) : rest;
|
||||
})
|
||||
.findFirst().orElse(null);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Serialisation
|
||||
// =========================================================================
|
||||
|
||||
private String serializeResource(RequestDetails requestDetails, IBaseResource resource) {
|
||||
try {
|
||||
return requestDetails.getFhirContext()
|
||||
.newJsonParser()
|
||||
.encodeResourceToString(resource);
|
||||
} catch (Exception e) {
|
||||
log.warn("Could not serialise resource for rejected submission storage: {}",
|
||||
e.getMessage());
|
||||
return "{\"error\": \"serialisation_failed\"}";
|
||||
}
|
||||
}
|
||||
|
||||
private String serializeR4Resource(Resource resource) {
|
||||
try {
|
||||
return ca.uhn.fhir.context.FhirContext.forR4Cached()
|
||||
.newJsonParser()
|
||||
.encodeResourceToString(resource);
|
||||
} catch (Exception e) {
|
||||
log.warn("Could not serialise R4 resource: {}", e.getMessage());
|
||||
return "{\"error\": \"serialisation_failed\"}";
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Operation classification helpers
|
||||
// =========================================================================
|
||||
|
||||
private boolean isWriteOperation(RequestDetails requestDetails) {
|
||||
RestOperationTypeEnum op = requestDetails.getRestOperationType();
|
||||
if (op == null) return false;
|
||||
return switch (op) {
|
||||
case CREATE, UPDATE, PATCH, DELETE -> true;
|
||||
default -> false;
|
||||
};
|
||||
}
|
||||
|
||||
private String deriveOperation(RequestDetails requestDetails) {
|
||||
RestOperationTypeEnum op = requestDetails.getRestOperationType();
|
||||
if (op == null) return "UNKNOWN";
|
||||
return switch (op) {
|
||||
case CREATE -> "CREATE";
|
||||
case UPDATE -> "UPDATE";
|
||||
case PATCH -> "PATCH";
|
||||
case DELETE -> "DELETE";
|
||||
case READ, VREAD -> "READ";
|
||||
case SEARCH_TYPE, SEARCH_SYSTEM -> "READ";
|
||||
default -> op.name();
|
||||
};
|
||||
}
|
||||
|
||||
private boolean isExemptedRequest(RequestDetails requestDetails) {
|
||||
HttpServletRequest req = (HttpServletRequest) requestDetails.getServletRequest();
|
||||
if (req == null) return false;
|
||||
return Boolean.TRUE.equals(
|
||||
req.getAttribute(KeycloakJwtInterceptor.REQUEST_ATTR_AUTH_EXEMPTED));
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Inner classes
|
||||
// =========================================================================
|
||||
|
||||
/** Immutable audit context extracted from request attributes */
|
||||
private record AuditContext(
|
||||
String clientId,
|
||||
String facility,
|
||||
String subject,
|
||||
String requestId,
|
||||
String requestIp) {}
|
||||
}
|
||||
@@ -0,0 +1,644 @@
|
||||
package bd.gov.dghs.fhir.interceptor;
|
||||
|
||||
import ca.uhn.fhir.interceptor.api.Hook;
|
||||
import ca.uhn.fhir.interceptor.api.Interceptor;
|
||||
import ca.uhn.fhir.interceptor.api.Pointcut;
|
||||
import ca.uhn.fhir.rest.api.server.RequestDetails;
|
||||
import ca.uhn.fhir.rest.server.exceptions.AuthenticationException;
|
||||
import ca.uhn.fhir.rest.server.exceptions.ForbiddenOperationException;
|
||||
import com.nimbusds.jose.JOSEException;
|
||||
import com.nimbusds.jose.JWSAlgorithm;
|
||||
import com.nimbusds.jose.jwk.source.DefaultJWKSetCache;
|
||||
import com.nimbusds.jose.jwk.source.JWKSource;
|
||||
import com.nimbusds.jose.jwk.source.RemoteJWKSet;
|
||||
import com.nimbusds.jose.proc.BadJOSEException;
|
||||
import com.nimbusds.jose.proc.JWSKeySelector;
|
||||
import com.nimbusds.jose.proc.JWSVerificationKeySelector;
|
||||
import com.nimbusds.jose.proc.SecurityContext;
|
||||
import com.nimbusds.jwt.JWTClaimsSet;
|
||||
import com.nimbusds.jwt.proc.ConfigurableJWTProcessor;
|
||||
import com.nimbusds.jwt.proc.DefaultJWTClaimsVerifier;
|
||||
import com.nimbusds.jwt.proc.DefaultJWTProcessor;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.slf4j.MDC;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.text.ParseException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* HAPI FHIR interceptor that enforces Keycloak JWT authentication and
|
||||
* role-based authorisation for all FHIR server requests.
|
||||
*
|
||||
* <h2>Token validation sequence</h2>
|
||||
* <p>For every inbound request (except exempted paths):
|
||||
* <ol>
|
||||
* <li>Extract Bearer token from {@code Authorization} header.</li>
|
||||
* <li>Verify JWT signature against Keycloak JWKS endpoint
|
||||
* (cached per {@code kid}, TTL 1 hour).</li>
|
||||
* <li>Verify token is not expired ({@code exp} claim).</li>
|
||||
* <li>Verify token issuer matches {@code https://auth.dghs.gov.bd/realms/hris}.</li>
|
||||
* <li>Verify token contains {@code mci-api} role in realm roles
|
||||
* OR resource access roles.</li>
|
||||
* <li>Extract {@code sending_facility} and {@code client_id} claims.</li>
|
||||
* <li>Populate MDC context for structured log correlation.</li>
|
||||
* <li>Set request attributes for downstream use by
|
||||
* {@link bd.gov.dghs.fhir.terminology.TerminologyCacheManager} and
|
||||
* {@link bd.gov.dghs.fhir.audit.AuditEventInterceptor}.</li>
|
||||
* </ol>
|
||||
*
|
||||
* <h2>Exempted paths</h2>
|
||||
* <ul>
|
||||
* <li>{@code GET /fhir/metadata} — CapabilityStatement, unauthenticated access allowed.</li>
|
||||
* <li>{@code GET /actuator/health/**} — load balancer health probes.</li>
|
||||
* <li>{@code GET /actuator/info} — build info, non-sensitive.</li>
|
||||
* </ul>
|
||||
* All other paths require a valid Bearer token with {@code mci-api} role.
|
||||
* Admin paths ({@code /admin/**}) require {@code fhir-admin} role in addition
|
||||
* to a valid token — enforced in the respective controller.
|
||||
*
|
||||
* <h2>JWKS caching</h2>
|
||||
* <p>Keycloak signing keys are cached using Nimbus {@link RemoteJWKSet} with
|
||||
* a {@link DefaultJWKSetCache} (TTL: 1 hour, refresh-ahead: 15 minutes before expiry).
|
||||
* When a JWT arrives with a {@code kid} not present in the local cache, the cache
|
||||
* is immediately refreshed from the JWKS endpoint regardless of TTL — this handles
|
||||
* Keycloak key rotation without a 1-hour lag.
|
||||
*
|
||||
* <h2>Role extraction</h2>
|
||||
* <p>Keycloak embeds roles in two locations within the JWT:
|
||||
* <pre>
|
||||
* {
|
||||
* "realm_access": { "roles": ["mci-api", "offline_access"] },
|
||||
* "resource_access": {
|
||||
* "fhir-vendor-org123": { "roles": ["mci-api"] }
|
||||
* }
|
||||
* }
|
||||
* </pre>
|
||||
* <p>This interceptor checks both locations. The {@code mci-api} role is
|
||||
* sufficient in either location — the check is OR, not AND.
|
||||
*
|
||||
* <h2>Sending facility extraction</h2>
|
||||
* <p>The sending facility identifier is extracted from a custom Keycloak
|
||||
* token claim. Keycloak is configured (see ops/keycloak-setup.md) to add
|
||||
* the facility ID as a claim named {@code sending_facility} via a user
|
||||
* attribute mapper on the {@code fhir-vendor-{org-id}} client.
|
||||
* If the claim is absent, the {@code client_id} is used as a fallback
|
||||
* facility identifier.
|
||||
*
|
||||
* <h2>Error responses</h2>
|
||||
* <ul>
|
||||
* <li>Missing token: {@code 401 Unauthorized} — "No Bearer token provided"</li>
|
||||
* <li>Invalid signature: {@code 401 Unauthorized} — "Token signature invalid"</li>
|
||||
* <li>Expired token: {@code 401 Unauthorized} — "Token has expired"</li>
|
||||
* <li>Wrong issuer: {@code 401 Unauthorized} — "Token issuer invalid"</li>
|
||||
* <li>Missing mci-api role: {@code 401 Unauthorized} — "Required role not present"</li>
|
||||
* </ul>
|
||||
* All 401 responses include a {@code WWW-Authenticate} header per RFC 6750.
|
||||
* Error details are intentionally minimal in the response body — full details
|
||||
* are written to the structured audit log to avoid leaking token content to callers.
|
||||
*/
|
||||
@Interceptor
|
||||
@Component
|
||||
public class KeycloakJwtInterceptor {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(KeycloakJwtInterceptor.class);
|
||||
|
||||
// =========================================================================
|
||||
// Request attribute keys — set on HttpServletRequest for downstream use
|
||||
// by TerminologyCacheManager, AuditEventInterceptor, and ClusterExpressionValidator
|
||||
// =========================================================================
|
||||
|
||||
/** Boolean — true if token has fhir-admin role */
|
||||
public static final String REQUEST_ATTR_IS_ADMIN = "BD_FHIR_IS_ADMIN";
|
||||
/** String — Keycloak client_id claim */
|
||||
public static final String REQUEST_ATTR_CLIENT_ID = "BD_FHIR_CLIENT_ID";
|
||||
/** String — sending_facility custom claim (falls back to client_id) */
|
||||
public static final String REQUEST_ATTR_FACILITY = "BD_FHIR_FACILITY";
|
||||
/** String — JWT sub claim (service account user ID) */
|
||||
public static final String REQUEST_ATTR_SUBJECT = "BD_FHIR_SUBJECT";
|
||||
/** String — per-request UUID for log correlation */
|
||||
public static final String REQUEST_ATTR_REQUEST_ID = "BD_FHIR_REQUEST_ID";
|
||||
/** JWTClaimsSet — full parsed claims, available for audit use */
|
||||
public static final String REQUEST_ATTR_CLAIMS = "BD_FHIR_JWT_CLAIMS";
|
||||
/** Boolean — true if token validation was bypassed (exempted path) */
|
||||
public static final String REQUEST_ATTR_AUTH_EXEMPTED = "BD_FHIR_AUTH_EXEMPTED";
|
||||
|
||||
// MDC keys — populated for structured log correlation
|
||||
private static final String MDC_REQUEST_ID = "requestId";
|
||||
private static final String MDC_CLIENT_ID = "clientId";
|
||||
private static final String MDC_FACILITY = "sendingFacility";
|
||||
private static final String MDC_REQUEST_IP = "requestIp";
|
||||
|
||||
// Keycloak claim names
|
||||
private static final String CLAIM_REALM_ACCESS = "realm_access";
|
||||
private static final String CLAIM_RESOURCE_ACCESS = "resource_access";
|
||||
private static final String CLAIM_ROLES = "roles";
|
||||
private static final String CLAIM_SENDING_FACILITY = "sending_facility";
|
||||
private static final String CLAIM_CLIENT_ID = "azp"; // Keycloak: authorised party
|
||||
|
||||
// Paths that do not require authentication
|
||||
private static final Set<String> EXEMPT_EXACT_PATHS = new HashSet<>(Arrays.asList(
|
||||
"/fhir/metadata",
|
||||
"/actuator/health",
|
||||
"/actuator/health/liveness",
|
||||
"/actuator/health/readiness",
|
||||
"/actuator/info"
|
||||
));
|
||||
|
||||
private static final Set<String> EXEMPT_PREFIX_PATHS = new HashSet<>(Arrays.asList(
|
||||
"/actuator/health/"
|
||||
));
|
||||
|
||||
@Value("${bd.fhir.keycloak.issuer}")
|
||||
private String expectedIssuer;
|
||||
|
||||
@Value("${bd.fhir.keycloak.jwks-url}")
|
||||
private String jwksUrl;
|
||||
|
||||
@Value("${bd.fhir.keycloak.required-role}")
|
||||
private String requiredRole;
|
||||
|
||||
@Value("${bd.fhir.keycloak.admin-role}")
|
||||
private String adminRole;
|
||||
|
||||
@Value("${bd.fhir.keycloak.jwks-cache-ttl-seconds}")
|
||||
private long jwksCacheTtlSeconds;
|
||||
|
||||
// Nimbus JWT processor — thread-safe, reused across all requests
|
||||
private ConfigurableJWTProcessor<SecurityContext> jwtProcessor;
|
||||
|
||||
@PostConstruct
|
||||
public void initialise() throws MalformedURLException {
|
||||
// Build JWKS source with DefaultJWKSetCache.
|
||||
//
|
||||
// DefaultJWKSetCache parameters:
|
||||
// lifespan: TTL for a cached key set (1 hour)
|
||||
// refreshTime: When remaining TTL < refreshTime, proactively refresh.
|
||||
// Set to 15 minutes — cache refreshes at 45min mark,
|
||||
// ensuring no gap between expiry and new key availability.
|
||||
// timeUnit: TimeUnit for the above values
|
||||
//
|
||||
// RemoteJWKSet behaviour on unknown kid:
|
||||
// If a JWT arrives with a kid not in the cached key set, RemoteJWKSet
|
||||
// immediately fetches the JWKS URL regardless of TTL. This handles
|
||||
// Keycloak key rotation: new tokens with new kid are validated correctly
|
||||
// within one JWKS round-trip, not after the cache expires.
|
||||
DefaultJWKSetCache jwkSetCache = new DefaultJWKSetCache(
|
||||
jwksCacheTtlSeconds,
|
||||
(long) (jwksCacheTtlSeconds * 0.75), // refresh at 75% of TTL
|
||||
TimeUnit.SECONDS
|
||||
);
|
||||
|
||||
JWKSource<SecurityContext> jwkSource = new RemoteJWKSet<>(
|
||||
new URL(jwksUrl),
|
||||
null, // default ResourceRetriever (uses HttpURLConnection)
|
||||
jwkSetCache
|
||||
);
|
||||
|
||||
// Key selector: accept RS256 tokens (Keycloak default signing algorithm).
|
||||
// Keycloak also supports RS384, RS512, PS256 — if your realm uses
|
||||
// a different algorithm, add it here. Never accept HS256 (symmetric) —
|
||||
// it requires sharing the secret with every verifier.
|
||||
JWSKeySelector<SecurityContext> keySelector =
|
||||
new JWSVerificationKeySelector<>(JWSAlgorithm.RS256, jwkSource);
|
||||
|
||||
// JWT claims verifier — validates exp, nbf, iss automatically.
|
||||
// The required claims set ensures these fields must be present.
|
||||
DefaultJWTClaimsVerifier<SecurityContext> claimsVerifier =
|
||||
new DefaultJWTClaimsVerifier<>(
|
||||
// Exact match claims — issuer must exactly equal expectedIssuer
|
||||
new JWTClaimsSet.Builder()
|
||||
.issuer(expectedIssuer)
|
||||
.build(),
|
||||
// Required claim names — must be present (any value)
|
||||
new HashSet<>(Arrays.asList("sub", "exp", "iat", CLAIM_CLIENT_ID))
|
||||
);
|
||||
|
||||
jwtProcessor = new DefaultJWTProcessor<>();
|
||||
jwtProcessor.setJWSKeySelector(keySelector);
|
||||
jwtProcessor.setJWTClaimsSetVerifier(claimsVerifier);
|
||||
|
||||
log.info("KeycloakJwtInterceptor initialised: issuer={} jwksUrl={} " +
|
||||
"requiredRole={} adminRole={} jwksCacheTtlSeconds={}",
|
||||
expectedIssuer, jwksUrl, requiredRole, adminRole, jwksCacheTtlSeconds);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// HAPI interceptor hook — fires before every request is processed
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Pre-request hook — validates JWT before HAPI processes the request.
|
||||
*
|
||||
* <p>Runs at {@link Pointcut#SERVER_INCOMING_REQUEST_PRE_HANDLED} —
|
||||
* after HAPI has parsed the request URL and method but before any
|
||||
* resource reading, validation, or persistence occurs.
|
||||
*
|
||||
* <p>On authentication failure: throws {@link AuthenticationException}
|
||||
* (HTTP 401) or {@link ForbiddenOperationException} (HTTP 403).
|
||||
* HAPI catches these and returns the appropriate HTTP response with
|
||||
* a FHIR OperationOutcome.
|
||||
*
|
||||
* <p>On success: sets request attributes and MDC context, then returns
|
||||
* normally — HAPI continues processing the request.
|
||||
*
|
||||
* @return {@code true} to continue processing; exception thrown on failure
|
||||
*/
|
||||
@Hook(Pointcut.SERVER_INCOMING_REQUEST_PRE_HANDLED)
|
||||
public boolean validateRequest(
|
||||
RequestDetails requestDetails,
|
||||
HttpServletRequest servletRequest,
|
||||
HttpServletResponse servletResponse) {
|
||||
|
||||
String requestId = UUID.randomUUID().toString();
|
||||
String requestPath = servletRequest.getRequestURI();
|
||||
String method = servletRequest.getMethod();
|
||||
String clientIp = extractClientIp(servletRequest);
|
||||
|
||||
// Assign request ID immediately — available in logs even for rejected requests
|
||||
servletRequest.setAttribute(REQUEST_ATTR_REQUEST_ID, requestId);
|
||||
|
||||
// Populate MDC with available context before auth (so rejected requests are logged)
|
||||
MDC.put(MDC_REQUEST_ID, requestId);
|
||||
MDC.put(MDC_REQUEST_IP, clientIp);
|
||||
|
||||
try {
|
||||
// Check if this path is exempt from authentication
|
||||
if (isExemptPath(requestPath, method)) {
|
||||
servletRequest.setAttribute(REQUEST_ATTR_AUTH_EXEMPTED, Boolean.TRUE);
|
||||
log.debug("Auth exempted: method={} path={} requestId={}",
|
||||
method, requestPath, requestId);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Extract Bearer token
|
||||
String authHeader = servletRequest.getHeader("Authorization");
|
||||
if (authHeader == null || authHeader.isBlank()) {
|
||||
log.info("Auth rejected — no Authorization header: " +
|
||||
"method={} path={} ip={} requestId={}",
|
||||
method, requestPath, clientIp, requestId);
|
||||
throw unauthorised("No Bearer token provided",
|
||||
servletResponse, requestId);
|
||||
}
|
||||
|
||||
if (!authHeader.startsWith("Bearer ")) {
|
||||
log.info("Auth rejected — Authorization header not Bearer: " +
|
||||
"method={} path={} ip={} requestId={}",
|
||||
method, requestPath, clientIp, requestId);
|
||||
throw unauthorised("Authorization header must use Bearer scheme",
|
||||
servletResponse, requestId);
|
||||
}
|
||||
|
||||
String token = authHeader.substring(7).trim();
|
||||
if (token.isBlank()) {
|
||||
throw unauthorised("Bearer token is empty",
|
||||
servletResponse, requestId);
|
||||
}
|
||||
|
||||
// Validate JWT: signature, expiry, issuer
|
||||
JWTClaimsSet claims;
|
||||
try {
|
||||
claims = jwtProcessor.process(token, null);
|
||||
} catch (com.nimbusds.jwt.proc.BadJWTException e) {
|
||||
// BadJWTException covers: expired, wrong issuer, missing required claims
|
||||
String reason = classifyJwtException(e);
|
||||
log.info("Auth rejected — JWT claims invalid: reason={} " +
|
||||
"method={} path={} ip={} requestId={}",
|
||||
reason, method, requestPath, clientIp, requestId);
|
||||
throw unauthorised(reason, servletResponse, requestId);
|
||||
|
||||
} catch (BadJOSEException e) {
|
||||
// BadJOSEException covers: invalid signature, no matching key
|
||||
log.info("Auth rejected — JWT signature invalid: method={} " +
|
||||
"path={} ip={} requestId={} detail={}",
|
||||
method, requestPath, clientIp, requestId, e.getMessage());
|
||||
throw unauthorised("Token signature invalid",
|
||||
servletResponse, requestId);
|
||||
|
||||
} catch (JOSEException e) {
|
||||
// JOSEException: key processing error, algorithm mismatch
|
||||
log.warn("Auth error — JOSE processing failed: method={} path={} " +
|
||||
"ip={} requestId={} error={}",
|
||||
method, requestPath, clientIp, requestId, e.getMessage());
|
||||
throw unauthorised("Token processing error",
|
||||
servletResponse, requestId);
|
||||
|
||||
} catch (ParseException e) {
|
||||
// Malformed JWT structure — not a valid JWT at all
|
||||
log.info("Auth rejected — malformed JWT: method={} path={} " +
|
||||
"ip={} requestId={}",
|
||||
method, requestPath, clientIp, requestId);
|
||||
throw unauthorised("Token is malformed",
|
||||
servletResponse, requestId);
|
||||
}
|
||||
|
||||
// Extract identity claims
|
||||
String clientId = extractClientId(claims);
|
||||
String subject = extractSubject(claims);
|
||||
String facility = extractFacility(claims, clientId);
|
||||
|
||||
// Role validation — check mci-api in both realm_access and resource_access
|
||||
boolean hasMciApiRole = hasRole(claims, requiredRole);
|
||||
if (!hasMciApiRole) {
|
||||
log.info("Auth rejected — missing required role '{}': " +
|
||||
"clientId={} subject={} method={} path={} ip={} requestId={}",
|
||||
requiredRole, clientId, subject, method,
|
||||
requestPath, clientIp, requestId);
|
||||
throw unauthorised("Required role '" + requiredRole + "' not present in token",
|
||||
servletResponse, requestId);
|
||||
}
|
||||
|
||||
// Check for admin role (does NOT fail if absent — stored as attribute)
|
||||
boolean hasAdminRole = hasRole(claims, adminRole);
|
||||
|
||||
// Set request attributes for downstream components
|
||||
servletRequest.setAttribute(REQUEST_ATTR_CLIENT_ID, clientId);
|
||||
servletRequest.setAttribute(REQUEST_ATTR_FACILITY, facility);
|
||||
servletRequest.setAttribute(REQUEST_ATTR_SUBJECT, subject);
|
||||
servletRequest.setAttribute(REQUEST_ATTR_IS_ADMIN, hasAdminRole);
|
||||
servletRequest.setAttribute(REQUEST_ATTR_CLAIMS, claims);
|
||||
servletRequest.setAttribute(REQUEST_ATTR_AUTH_EXEMPTED, Boolean.FALSE);
|
||||
|
||||
// Populate MDC with full identity context for structured logging.
|
||||
// All log statements after this point (in this thread) will include
|
||||
// clientId, sendingFacility, and requestId automatically.
|
||||
MDC.put(MDC_CLIENT_ID, clientId);
|
||||
MDC.put(MDC_FACILITY, facility);
|
||||
|
||||
log.debug("Auth accepted: clientId={} facility={} subject={} " +
|
||||
"isAdmin={} method={} path={} requestId={}",
|
||||
clientId, facility, subject, hasAdminRole,
|
||||
method, requestPath, requestId);
|
||||
|
||||
return true; // Proceed with request processing
|
||||
|
||||
} catch (AuthenticationException | ForbiddenOperationException e) {
|
||||
// Re-throw HAPI exceptions — do not wrap them
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
// Unexpected error in auth interceptor — fail closed
|
||||
log.error("Unexpected error in JWT interceptor: method={} path={} " +
|
||||
"ip={} requestId={} error={}",
|
||||
method, requestPath, clientIp, requestId, e.getMessage(), e);
|
||||
throw unauthorised("Authentication processing error",
|
||||
servletResponse, requestId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Post-request hook — cleans up MDC context after request completes.
|
||||
*
|
||||
* <p>MDC is thread-local. In a thread-pool model, threads are reused.
|
||||
* If MDC is not cleared after each request, the next request on the
|
||||
* same thread inherits the previous request's MDC context — causing
|
||||
* log entries for request N to appear attributed to client/facility of
|
||||
* request N-1.
|
||||
*
|
||||
* <p>This is the most common MDC bug in HAPI overlays and the most
|
||||
* difficult to reproduce in testing (requires concurrent load).
|
||||
*/
|
||||
@Hook(Pointcut.SERVER_PROCESSING_COMPLETED_NORMALLY)
|
||||
public void clearMdcOnSuccess(RequestDetails requestDetails) {
|
||||
MDC.remove(MDC_REQUEST_ID);
|
||||
MDC.remove(MDC_CLIENT_ID);
|
||||
MDC.remove(MDC_FACILITY);
|
||||
MDC.remove(MDC_REQUEST_IP);
|
||||
}
|
||||
|
||||
/**
|
||||
* Post-request hook for failed requests — cleans up MDC even on exception.
|
||||
*
|
||||
* <p>Both hooks are required. {@code SERVER_PROCESSING_COMPLETED_NORMALLY}
|
||||
* does not fire on exceptions. Without this hook, MDC leaks on any request
|
||||
* that results in an exception (including 422 validation failures).
|
||||
*/
|
||||
@Hook(Pointcut.SERVER_PROCESSING_COMPLETED)
|
||||
public void clearMdcAlways(RequestDetails requestDetails) {
|
||||
MDC.remove(MDC_REQUEST_ID);
|
||||
MDC.remove(MDC_CLIENT_ID);
|
||||
MDC.remove(MDC_FACILITY);
|
||||
MDC.remove(MDC_REQUEST_IP);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Role extraction — Keycloak JWT structure
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Checks whether a JWT contains the specified role.
|
||||
*
|
||||
* <p>Keycloak places roles in two locations:
|
||||
* <ul>
|
||||
* <li>{@code realm_access.roles[]} — realm-level roles</li>
|
||||
* <li>{@code resource_access.{client-id}.roles[]} — client-level roles</li>
|
||||
* </ul>
|
||||
* The role is present if it appears in either location.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean hasRole(JWTClaimsSet claims, String role) {
|
||||
try {
|
||||
// Check realm_access.roles
|
||||
Map<String, Object> realmAccess =
|
||||
(Map<String, Object>) claims.getClaim(CLAIM_REALM_ACCESS);
|
||||
if (realmAccess != null) {
|
||||
List<String> realmRoles = (List<String>) realmAccess.get(CLAIM_ROLES);
|
||||
if (realmRoles != null && realmRoles.contains(role)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Check resource_access.*.roles
|
||||
Map<String, Object> resourceAccess =
|
||||
(Map<String, Object>) claims.getClaim(CLAIM_RESOURCE_ACCESS);
|
||||
if (resourceAccess != null) {
|
||||
for (Map.Entry<String, Object> entry : resourceAccess.entrySet()) {
|
||||
Map<String, Object> clientAccess =
|
||||
(Map<String, Object>) entry.getValue();
|
||||
if (clientAccess != null) {
|
||||
List<String> clientRoles =
|
||||
(List<String>) clientAccess.get(CLAIM_ROLES);
|
||||
if (clientRoles != null && clientRoles.contains(role)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
} catch (ClassCastException e) {
|
||||
// Malformed role claims — treat as role absent
|
||||
log.warn("Malformed role claims in JWT: {}", e.getMessage());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Claim extraction helpers
|
||||
// =========================================================================
|
||||
|
||||
private String extractClientId(JWTClaimsSet claims) {
|
||||
try {
|
||||
String azp = claims.getStringClaim(CLAIM_CLIENT_ID);
|
||||
return azp != null ? azp : "unknown";
|
||||
} catch (ParseException e) {
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
private String extractSubject(JWTClaimsSet claims) {
|
||||
String sub = claims.getSubject();
|
||||
return sub != null ? sub : "unknown";
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the sending facility identifier from the JWT.
|
||||
*
|
||||
* <p>The {@code sending_facility} claim is a custom Keycloak mapper
|
||||
* configured on each vendor client (see ops/keycloak-setup.md).
|
||||
* It contains the DGHS facility code of the submitting organisation.
|
||||
*
|
||||
* <p>If absent (e.g., during initial rollout before all clients are
|
||||
* configured), falls back to {@code client_id}. This fallback is logged
|
||||
* at WARN level so the ops team can identify unconfigured clients.
|
||||
*/
|
||||
private String extractFacility(JWTClaimsSet claims, String clientId) {
|
||||
try {
|
||||
String facility = claims.getStringClaim(CLAIM_SENDING_FACILITY);
|
||||
if (facility != null && !facility.isBlank()) {
|
||||
return facility;
|
||||
}
|
||||
// Fallback — log so unconfigured clients are visible
|
||||
log.warn("sending_facility claim absent in token for clientId={}. " +
|
||||
"Using client_id as facility identifier. " +
|
||||
"Configure a sending_facility user attribute mapper on this " +
|
||||
"Keycloak client — see ops/keycloak-setup.md.",
|
||||
clientId);
|
||||
return clientId;
|
||||
} catch (ParseException e) {
|
||||
return clientId;
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Path exemption
|
||||
// =========================================================================
|
||||
|
||||
private boolean isExemptPath(String requestPath, String method) {
|
||||
// Exact path match
|
||||
if (EXEMPT_EXACT_PATHS.contains(requestPath)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Prefix match for /actuator/health/* sub-paths
|
||||
for (String prefix : EXEMPT_PREFIX_PATHS) {
|
||||
if (requestPath.startsWith(prefix)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Prometheus metrics endpoint — only exempt if accessed from internal network.
|
||||
// For simplicity at pilot phase, /actuator/prometheus is exempted entirely.
|
||||
// At national rollout, restrict to internal monitoring network via nginx.
|
||||
if (requestPath.equals("/actuator/prometheus") ||
|
||||
requestPath.equals("/actuator/metrics")) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Error handling
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Builds and throws an {@link AuthenticationException} (HTTP 401).
|
||||
*
|
||||
* <p>Sets {@code WWW-Authenticate} header per RFC 6750 §3.1.
|
||||
* The realm value identifies the BD FHIR server. Error description
|
||||
* is intentionally generic in the HTTP response — full details are
|
||||
* in the audit log only.
|
||||
*
|
||||
* <p>The requestId is included in the response so vendors can correlate
|
||||
* a rejected submission with the audit log entry. This is the only
|
||||
* DGHS-internal identifier that vendors receive.
|
||||
*/
|
||||
private AuthenticationException unauthorised(
|
||||
String internalReason,
|
||||
HttpServletResponse response,
|
||||
String requestId) {
|
||||
|
||||
// WWW-Authenticate header per RFC 6750
|
||||
response.setHeader("WWW-Authenticate",
|
||||
"Bearer realm=\"BD FHIR National Repository\", " +
|
||||
"error=\"invalid_token\", " +
|
||||
"error_description=\"Token validation failed\"");
|
||||
|
||||
// Return minimal information in the exception message.
|
||||
// The HAPI framework converts this to an OperationOutcome.
|
||||
// Do NOT include the internalReason in the response — it may
|
||||
// reveal token structure information to an attacker.
|
||||
return new AuthenticationException(
|
||||
"Authentication failed. RequestId: " + requestId +
|
||||
" — present this ID to DGHS support for investigation.");
|
||||
}
|
||||
|
||||
/**
|
||||
* Classifies a Nimbus {@link com.nimbusds.jwt.proc.BadJWTException}
|
||||
* into a human-readable reason for internal logging.
|
||||
*
|
||||
* <p>These reasons go to the audit log only — never to the HTTP response.
|
||||
*/
|
||||
private String classifyJwtException(com.nimbusds.jwt.proc.BadJWTException e) {
|
||||
String msg = e.getMessage();
|
||||
if (msg == null) return "JWT validation failed";
|
||||
String lower = msg.toLowerCase();
|
||||
if (lower.contains("expired")) return "Token has expired";
|
||||
if (lower.contains("issuer")) return "Token issuer mismatch: expected " + expectedIssuer;
|
||||
if (lower.contains("not before")) return "Token not yet valid (nbf claim)";
|
||||
if (lower.contains("missing") || lower.contains("required")) {
|
||||
return "Token missing required claim";
|
||||
}
|
||||
// Do not include the raw message — it may contain token fragments
|
||||
return "Token claims validation failed";
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Client IP extraction
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Extracts the real client IP from the request.
|
||||
*
|
||||
* <p>nginx is configured to set {@code X-Forwarded-For} with the real
|
||||
* client IP. Trust only the first IP in the header — it is set by nginx
|
||||
* and cannot be spoofed by the client (nginx overwrites, not appends).
|
||||
*
|
||||
* <p>If {@code X-Forwarded-For} is absent (direct connection, possible
|
||||
* in development), falls back to {@code RemoteAddr}.
|
||||
*/
|
||||
private String extractClientIp(HttpServletRequest request) {
|
||||
String xff = request.getHeader("X-Forwarded-For");
|
||||
if (xff != null && !xff.isBlank()) {
|
||||
// nginx sets exactly one IP — no need to split on comma
|
||||
// If comma-separated, take the first (leftmost = original client)
|
||||
int commaIdx = xff.indexOf(',');
|
||||
return commaIdx > 0 ? xff.substring(0, commaIdx).trim() : xff.trim();
|
||||
}
|
||||
return request.getRemoteAddr();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,609 @@
|
||||
package bd.gov.dghs.fhir.terminology;
|
||||
|
||||
import ca.uhn.fhir.context.FhirContext;
|
||||
import ca.uhn.fhir.context.support.ConceptValidationOptions;
|
||||
import ca.uhn.fhir.context.support.IValidationSupport;
|
||||
import ca.uhn.fhir.context.support.ValidationSupportContext;
|
||||
import ca.uhn.fhir.context.support.ValueSetExpansionOptions;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.hc.client5.http.classic.methods.HttpGet;
|
||||
import org.apache.hc.client5.http.config.RequestConfig;
|
||||
import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
|
||||
import org.apache.hc.client5.http.impl.classic.HttpClients;
|
||||
import org.apache.hc.core5.http.ClassicHttpResponse;
|
||||
import org.apache.hc.core5.util.Timeout;
|
||||
import org.hl7.fhir.instance.model.api.IBaseResource;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import java.io.IOException;
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.time.Instant;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Custom HAPI validation support that integrates OCL as the national
|
||||
* terminology authority for ICD-11 code validation.
|
||||
*
|
||||
* <h2>Design</h2>
|
||||
* <p>This class extends HAPI's {@link IValidationSupport} interface and sits
|
||||
* last in the {@link ca.uhn.fhir.context.support.ValidationSupportChain}.
|
||||
* It intercepts terminology validation calls for the ICD-11 MMS system
|
||||
* ({@code http://id.who.int/icd/release/11/mms}) and routes them to OCL
|
||||
* {@code $validate-code}.
|
||||
*
|
||||
* <h2>$expand suppression</h2>
|
||||
* <p>OCL returns an empty response for {@code $expand}. HAPI's default
|
||||
* {@link ca.uhn.fhir.jpa.term.api.ITermReadSvc} treats an empty expansion
|
||||
* as a validation failure, causing all ICD-11 coded resources to be rejected
|
||||
* regardless of code validity. This class overrides
|
||||
* {@link #expandValueSet(ValidationSupportContext, ValueSetExpansionOptions, IBaseResource)}
|
||||
* to return {@code null} (not-supported) for ICD-11 ValueSets, which instructs
|
||||
* the chain to skip expansion and proceed directly to {@code $validate-code}.
|
||||
*
|
||||
* <h2>Caching</h2>
|
||||
* <p>Validated codes are cached in a {@link ConcurrentHashMap} with a
|
||||
* configurable TTL (default 24 hours). Cache entries store the validation result
|
||||
* (valid/invalid) and the timestamp of validation. On cache hit, OCL is never called.
|
||||
*
|
||||
* <p>Cache key format: {@code system|version|code}
|
||||
* where version is the OCL collection version string or empty string if absent.
|
||||
*
|
||||
* <h2>Cache invalidation</h2>
|
||||
* <p>The cache is flushed by {@link TerminologyCacheManager} when the ICD-11
|
||||
* version upgrade pipeline calls the admin flush endpoint. After flush, the next
|
||||
* validation call for each code hits OCL again and repopulates the cache.
|
||||
*
|
||||
* <h2>Error handling</h2>
|
||||
* <p>OCL connectivity failures are handled as follows:
|
||||
* <ul>
|
||||
* <li>Timeout (>10s): return {@code null} (not-supported) — HAPI falls through
|
||||
* to next support in chain. If no other support handles it, the code is
|
||||
* treated as valid (fail-open for OCL outages).</li>
|
||||
* <li>HTTP 4xx: return invalid result — code was rejected by OCL.</li>
|
||||
* <li>HTTP 5xx: return {@code null} (not-supported) — OCL server error,
|
||||
* fail-open to prevent full service outage during OCL maintenance.</li>
|
||||
* <li>{@code $expand} failure: log and return {@code null} — never reject.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>Fail-open for OCL outages is a deliberate policy decision. The alternative
|
||||
* (fail-closed) would reject all coded resource submissions during OCL downtime,
|
||||
* which is operationally worse than allowing a small window of unvalidated codes.
|
||||
* OCL outages must be tracked in the audit log (see AuditEventInterceptor).
|
||||
*/
|
||||
@Component
|
||||
public class BdTerminologyValidationSupport implements IValidationSupport {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(BdTerminologyValidationSupport.class);
|
||||
|
||||
/** ICD-11 MMS system URI — must match BD Core IG profile declarations */
|
||||
public static final String ICD11_SYSTEM = "http://id.who.int/icd/release/11/mms";
|
||||
|
||||
/** Cache: key = "system|version|code", value = CacheEntry */
|
||||
private final Map<String, CacheEntry> validationCache = new ConcurrentHashMap<>();
|
||||
|
||||
/** Background thread for cache TTL eviction */
|
||||
private final ScheduledExecutorService cacheEvictionExecutor =
|
||||
Executors.newSingleThreadScheduledExecutor(r -> {
|
||||
Thread t = new Thread(r, "terminology-cache-eviction");
|
||||
t.setDaemon(true);
|
||||
return t;
|
||||
});
|
||||
|
||||
private final FhirContext fhirContext;
|
||||
private final ObjectMapper objectMapper;
|
||||
private CloseableHttpClient httpClient;
|
||||
|
||||
@Value("${bd.fhir.ocl.base-url}")
|
||||
private String oclBaseUrl;
|
||||
|
||||
@Value("${bd.fhir.ocl.timeout-seconds}")
|
||||
private int timeoutSeconds;
|
||||
|
||||
@Value("${bd.fhir.ocl.retry-attempts}")
|
||||
private int retryAttempts;
|
||||
|
||||
@Value("${bd.fhir.terminology.cache-ttl-seconds}")
|
||||
private long cacheTtlSeconds;
|
||||
|
||||
public BdTerminologyValidationSupport(FhirContext fhirContext) {
|
||||
this.fhirContext = fhirContext;
|
||||
this.objectMapper = new ObjectMapper();
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
public void initialise() {
|
||||
// Build a dedicated HttpClient for OCL calls.
|
||||
// Separate from HAPI's internal HttpClient to avoid
|
||||
// interference with HAPI's own REST operations.
|
||||
RequestConfig requestConfig = RequestConfig.custom()
|
||||
.setConnectionRequestTimeout(Timeout.ofSeconds(timeoutSeconds))
|
||||
.setResponseTimeout(Timeout.ofSeconds(timeoutSeconds))
|
||||
.build();
|
||||
|
||||
this.httpClient = HttpClients.custom()
|
||||
.setDefaultRequestConfig(requestConfig)
|
||||
.setMaxConnTotal(20) // max total OCL connections
|
||||
.setMaxConnPerRoute(20) // all connections go to one host
|
||||
.evictExpiredConnections()
|
||||
.evictIdleConnections(Timeout.ofMinutes(5))
|
||||
.build();
|
||||
|
||||
// Schedule cache eviction every hour.
|
||||
// Eviction removes entries older than cacheTtlSeconds.
|
||||
// This prevents unbounded cache growth over long uptime periods.
|
||||
cacheEvictionExecutor.scheduleAtFixedRate(
|
||||
this::evictExpiredCacheEntries,
|
||||
1, 1, TimeUnit.HOURS);
|
||||
|
||||
log.info("BdTerminologyValidationSupport initialised: oclBaseUrl={}, " +
|
||||
"timeoutSeconds={}, cacheTtlSeconds={}", oclBaseUrl, timeoutSeconds, cacheTtlSeconds);
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
public void shutdown() {
|
||||
cacheEvictionExecutor.shutdownNow();
|
||||
try {
|
||||
if (httpClient != null) {
|
||||
httpClient.close();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
log.warn("Error closing OCL HttpClient: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// IValidationSupport interface
|
||||
// =========================================================================
|
||||
|
||||
@Override
|
||||
public FhirContext getFhirContext() {
|
||||
return fhirContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by HAPI validation chain to validate a code against a ValueSet.
|
||||
*
|
||||
* <p>For ICD-11 codes: checks cache first, then calls OCL {@code $validate-code}.
|
||||
* For non-ICD-11 codes: returns {@code null} (defer to other supports in chain).
|
||||
*/
|
||||
@Override
|
||||
public CodeValidationResult validateCode(
|
||||
ValidationSupportContext theValidationSupportContext,
|
||||
ConceptValidationOptions theOptions,
|
||||
String theCodeSystem,
|
||||
String theCode,
|
||||
String theDisplay,
|
||||
String theValueSetUrl) {
|
||||
|
||||
if (!isIcd11System(theCodeSystem)) {
|
||||
// Not our responsibility — defer to next support in chain
|
||||
return null;
|
||||
}
|
||||
|
||||
if (theCode == null || theCode.isBlank()) {
|
||||
return invalid("Code is null or empty", theCodeSystem, theCode);
|
||||
}
|
||||
|
||||
String cacheKey = buildCacheKey(theCodeSystem, null, theCode);
|
||||
|
||||
// Cache hit
|
||||
CacheEntry cached = validationCache.get(cacheKey);
|
||||
if (cached != null && !cached.isExpired(cacheTtlSeconds)) {
|
||||
log.debug("Terminology cache hit: system={} code={} valid={}",
|
||||
theCodeSystem, theCode, cached.valid);
|
||||
return cached.valid
|
||||
? valid(theCode, cached.display)
|
||||
: invalid(cached.invalidReason, theCodeSystem, theCode);
|
||||
}
|
||||
|
||||
// Cache miss — call OCL
|
||||
return validateWithOcl(theCodeSystem, theCode, theDisplay, theValueSetUrl, cacheKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by HAPI validation chain to validate a code against a CodeSystem.
|
||||
* Delegates to {@link #validateCode} for ICD-11 codes.
|
||||
*/
|
||||
@Override
|
||||
public CodeValidationResult validateCodeInValueSet(
|
||||
ValidationSupportContext theValidationSupportContext,
|
||||
ConceptValidationOptions theOptions,
|
||||
String theCodeSystem,
|
||||
String theCode,
|
||||
String theDisplay,
|
||||
IBaseResource theValueSet) {
|
||||
|
||||
if (!isIcd11System(theCodeSystem)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return validateCode(theValidationSupportContext, theOptions,
|
||||
theCodeSystem, theCode, theDisplay, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Suppresses $expand for ICD-11 ValueSets.
|
||||
*
|
||||
* <p>OCL returns an empty response for {@code $expand}. If this method
|
||||
* returned a failed expansion result, HAPI would treat it as a validation
|
||||
* failure and reject all ICD-11 coded resources. Instead, we return
|
||||
* {@code null} (not supported), which instructs the chain to skip
|
||||
* expansion for this ValueSet.
|
||||
*
|
||||
* <p>This override is the key to OCL integration correctness.
|
||||
* Without it, HAPI calls {@code $expand}, gets an empty response,
|
||||
* and rejects the resource regardless of whether the code is valid.
|
||||
*/
|
||||
@Override
|
||||
public ValueSetExpansionOutcome expandValueSet(
|
||||
ValidationSupportContext theValidationSupportContext,
|
||||
ValueSetExpansionOptions theExpansionOptions,
|
||||
IBaseResource theValueSetToExpand) {
|
||||
|
||||
// Check if this is an ICD-11 ValueSet before suppressing
|
||||
if (theValueSetToExpand != null) {
|
||||
String valueSetUrl = extractValueSetUrl(theValueSetToExpand);
|
||||
if (valueSetUrl != null && valueSetUrl.contains("icd11")) {
|
||||
log.debug("Suppressing $expand for ICD-11 ValueSet: {} " +
|
||||
"(OCL does not support $expand — using $validate-code instead)",
|
||||
valueSetUrl);
|
||||
// Return null = not supported by this support.
|
||||
// The chain will try the next support, which will also
|
||||
// return null, and HAPI will fall through to $validate-code.
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Non-ICD-11 ValueSets: not our responsibility
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether this support can handle ValueSet expansion for ICD-11.
|
||||
*
|
||||
* <p>Returning {@code false} for ICD-11 ValueSets prevents HAPI from
|
||||
* even attempting {@code $expand} via this support — it goes directly
|
||||
* to {@code $validate-code}. This is the {@code isValueSetSupported()}
|
||||
* override hook added in HAPI 7.2.0 specifically for this use case.
|
||||
*/
|
||||
@Override
|
||||
public boolean isValueSetSupported(
|
||||
ValidationSupportContext theValidationSupportContext,
|
||||
String theValueSetUrl) {
|
||||
|
||||
if (theValueSetUrl != null && theValueSetUrl.contains("icd11")) {
|
||||
log.debug("isValueSetSupported=false for ICD-11 ValueSet: {} " +
|
||||
"(routing to $validate-code)", theValueSetUrl);
|
||||
return false;
|
||||
}
|
||||
return false; // Let other supports in chain answer for non-ICD-11 ValueSets
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether this support can handle code system lookups for ICD-11.
|
||||
*/
|
||||
@Override
|
||||
public boolean isCodeSystemSupported(
|
||||
ValidationSupportContext theValidationSupportContext,
|
||||
String theSystem) {
|
||||
return isIcd11System(theSystem);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// OCL $validate-code call
|
||||
// =========================================================================
|
||||
|
||||
private CodeValidationResult validateWithOcl(
|
||||
String codeSystem,
|
||||
String code,
|
||||
String display,
|
||||
String valueSetUrl,
|
||||
String cacheKey) {
|
||||
|
||||
// Build OCL $validate-code URL.
|
||||
// Use valueSetUrl if provided (checks class restriction via bd-condition-icd11 VS).
|
||||
// Fall back to code system validation if no ValueSet provided.
|
||||
String url = buildValidateCodeUrl(codeSystem, code, display, valueSetUrl);
|
||||
|
||||
log.debug("OCL $validate-code: url={}", url);
|
||||
|
||||
for (int attempt = 1; attempt <= retryAttempts; attempt++) {
|
||||
try {
|
||||
CodeValidationResult result = executeOclCall(url, codeSystem, code, cacheKey);
|
||||
if (result != null) {
|
||||
return result;
|
||||
}
|
||||
} catch (OclTimeoutException e) {
|
||||
log.warn("OCL $validate-code timeout (attempt {}/{}): system={} code={} url={}",
|
||||
attempt, retryAttempts, codeSystem, code, url);
|
||||
if (attempt == retryAttempts) {
|
||||
// After all retries exhausted: fail-open.
|
||||
// Log as warn — the AuditEventInterceptor will record
|
||||
// the OCL unavailability in the audit trail.
|
||||
log.warn("OCL unavailable after {} attempts — accepting code " +
|
||||
"without terminology validation (fail-open): system={} code={}",
|
||||
retryAttempts, codeSystem, code);
|
||||
return null; // null = not supported = defer = fail-open
|
||||
}
|
||||
// Brief wait before retry
|
||||
try { Thread.sleep(500L * attempt); } catch (InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
return null;
|
||||
}
|
||||
} catch (OclServerErrorException e) {
|
||||
log.warn("OCL server error (attempt {}/{}): system={} code={} status={}",
|
||||
attempt, retryAttempts, codeSystem, code, e.statusCode);
|
||||
if (attempt == retryAttempts) {
|
||||
log.warn("OCL server error after {} attempts — fail-open: system={} code={}",
|
||||
retryAttempts, codeSystem, code);
|
||||
return null; // fail-open on server errors
|
||||
}
|
||||
try { Thread.sleep(500L * attempt); } catch (InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
return null;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Unexpected error calling OCL: system={} code={} error={}",
|
||||
codeSystem, code, e.getMessage(), e);
|
||||
return null; // fail-open on unexpected errors
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private CodeValidationResult executeOclCall(
|
||||
String url, String codeSystem, String code, String cacheKey)
|
||||
throws OclTimeoutException, OclServerErrorException, IOException {
|
||||
|
||||
HttpGet request = new HttpGet(url);
|
||||
request.setHeader("Accept", "application/fhir+json");
|
||||
|
||||
return httpClient.execute(request, (ClassicHttpResponse response) -> {
|
||||
int statusCode = response.getCode();
|
||||
|
||||
if (statusCode == 200) {
|
||||
// Parse Parameters response from OCL $validate-code
|
||||
byte[] body = response.getEntity().getContent().readAllBytes();
|
||||
return parseValidateCodeResponse(body, codeSystem, code, cacheKey);
|
||||
|
||||
} else if (statusCode >= 400 && statusCode < 500) {
|
||||
// 4xx: code rejected by OCL (not found, wrong system, wrong class)
|
||||
String reason = "OCL rejected code: HTTP " + statusCode +
|
||||
" for system=" + codeSystem + " code=" + code;
|
||||
log.info("OCL $validate-code rejected: system={} code={} status={}",
|
||||
codeSystem, code, statusCode);
|
||||
// Cache the rejection — do not re-call OCL for the same invalid code
|
||||
validationCache.put(cacheKey, CacheEntry.invalid(reason));
|
||||
return invalid(reason, codeSystem, code);
|
||||
|
||||
} else if (statusCode >= 500) {
|
||||
throw new OclServerErrorException(statusCode);
|
||||
|
||||
} else {
|
||||
log.warn("Unexpected OCL status: {}", statusCode);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private CodeValidationResult parseValidateCodeResponse(
|
||||
byte[] body, String codeSystem, String code, String cacheKey) {
|
||||
|
||||
try {
|
||||
JsonNode root = objectMapper.readTree(body);
|
||||
|
||||
// OCL $validate-code response is a FHIR Parameters resource.
|
||||
// Key parameter: "result" (boolean) — true if valid, false if invalid.
|
||||
// Optional parameter: "display" — preferred display term.
|
||||
// Optional parameter: "message" — reason for invalidity.
|
||||
JsonNode parameter = root.path("parameter");
|
||||
boolean result = false;
|
||||
String display = null;
|
||||
String message = null;
|
||||
|
||||
if (parameter.isArray()) {
|
||||
for (JsonNode param : parameter) {
|
||||
String name = param.path("name").asText();
|
||||
switch (name) {
|
||||
case "result" -> result = param.path("valueBoolean").asBoolean(false);
|
||||
case "display" -> display = param.path("valueString").asText(null);
|
||||
case "message" -> message = param.path("valueString").asText(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result) {
|
||||
log.debug("OCL validated code: system={} code={} display={}",
|
||||
codeSystem, code, display);
|
||||
validationCache.put(cacheKey, CacheEntry.valid(display));
|
||||
return valid(code, display);
|
||||
} else {
|
||||
String reason = message != null ? message :
|
||||
"Code not valid in system: system=" + codeSystem + " code=" + code;
|
||||
log.info("OCL rejected code: system={} code={} reason={}", codeSystem, code, reason);
|
||||
validationCache.put(cacheKey, CacheEntry.invalid(reason));
|
||||
return invalid(reason, codeSystem, code);
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to parse OCL $validate-code response: system={} code={} error={}",
|
||||
codeSystem, code, e.getMessage());
|
||||
// Parse failure: fail-open — do not reject the resource
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Cache management (called by TerminologyCacheManager)
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Flushes the entire terminology validation cache.
|
||||
*
|
||||
* <p>Called by {@link TerminologyCacheManager} when the ICD-11 version
|
||||
* upgrade pipeline completes. After flush, the next validation call for
|
||||
* each code hits OCL and repopulates the cache with the new version's results.
|
||||
*
|
||||
* <p>Thread-safe: {@link ConcurrentHashMap#clear()} is atomic.
|
||||
*
|
||||
* @return number of entries that were evicted
|
||||
*/
|
||||
public int flushCache() {
|
||||
int size = validationCache.size();
|
||||
validationCache.clear();
|
||||
log.info("Terminology cache flushed: {} entries evicted", size);
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current cache statistics for the admin endpoint.
|
||||
*/
|
||||
public CacheStats getCacheStats() {
|
||||
long now = Instant.now().getEpochSecond();
|
||||
long expired = validationCache.values().stream()
|
||||
.filter(e -> e.isExpired(cacheTtlSeconds))
|
||||
.count();
|
||||
return new CacheStats(validationCache.size(), expired, cacheTtlSeconds);
|
||||
}
|
||||
|
||||
private void evictExpiredCacheEntries() {
|
||||
int before = validationCache.size();
|
||||
validationCache.entrySet().removeIf(e -> e.getValue().isExpired(cacheTtlSeconds));
|
||||
int evicted = before - validationCache.size();
|
||||
if (evicted > 0) {
|
||||
log.debug("Terminology cache eviction: {} expired entries removed", evicted);
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// URL builders
|
||||
// =========================================================================
|
||||
|
||||
private String buildValidateCodeUrl(
|
||||
String codeSystem, String code, String display, String valueSetUrl) {
|
||||
|
||||
StringBuilder sb = new StringBuilder(oclBaseUrl);
|
||||
|
||||
if (valueSetUrl != null && !valueSetUrl.isBlank()) {
|
||||
// ValueSet-scoped validation — enforces class restriction
|
||||
// (Diagnosis + Finding only for bd-condition-icd11-diagnosis-valueset)
|
||||
sb.append("/ValueSet/$validate-code");
|
||||
sb.append("?url=").append(encode(valueSetUrl));
|
||||
sb.append("&system=").append(encode(codeSystem));
|
||||
sb.append("&code=").append(encode(code));
|
||||
} else {
|
||||
// CodeSystem-scoped validation — validates existence only
|
||||
sb.append("/CodeSystem/$validate-code");
|
||||
sb.append("?system=").append(encode(codeSystem));
|
||||
sb.append("&code=").append(encode(code));
|
||||
}
|
||||
|
||||
if (display != null && !display.isBlank()) {
|
||||
sb.append("&display=").append(encode(display));
|
||||
}
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Helpers
|
||||
// =========================================================================
|
||||
|
||||
private boolean isIcd11System(String system) {
|
||||
return ICD11_SYSTEM.equals(system);
|
||||
}
|
||||
|
||||
private String buildCacheKey(String system, String version, String code) {
|
||||
return system + "|" + (version != null ? version : "") + "|" + code;
|
||||
}
|
||||
|
||||
private String encode(String value) {
|
||||
return URLEncoder.encode(value, StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
private String extractValueSetUrl(IBaseResource valueSet) {
|
||||
try {
|
||||
// Use FHIR R4 reflection to extract url from ValueSet resource
|
||||
if (valueSet instanceof org.hl7.fhir.r4.model.ValueSet vs) {
|
||||
return vs.getUrl();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.debug("Could not extract ValueSet URL: {}", e.getMessage());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private CodeValidationResult valid(String code, String display) {
|
||||
return new CodeValidationResult()
|
||||
.setCode(code)
|
||||
.setDisplay(display)
|
||||
.setSeverity(IssueSeverity.INFORMATION);
|
||||
}
|
||||
|
||||
private CodeValidationResult invalid(String message, String system, String code) {
|
||||
return new CodeValidationResult()
|
||||
.setSeverity(IssueSeverity.ERROR)
|
||||
.setMessage(message)
|
||||
.setCode(code)
|
||||
.setCodeSystemName(system);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Inner classes
|
||||
// =========================================================================
|
||||
|
||||
/** Cache entry holding validation result and creation timestamp. */
|
||||
private static final class CacheEntry {
|
||||
final boolean valid;
|
||||
final String display; // non-null if valid
|
||||
final String invalidReason; // non-null if invalid
|
||||
final long createdEpochSeconds;
|
||||
|
||||
private CacheEntry(boolean valid, String display, String invalidReason) {
|
||||
this.valid = valid;
|
||||
this.display = display;
|
||||
this.invalidReason = invalidReason;
|
||||
this.createdEpochSeconds = Instant.now().getEpochSecond();
|
||||
}
|
||||
|
||||
static CacheEntry valid(String display) {
|
||||
return new CacheEntry(true, display, null);
|
||||
}
|
||||
|
||||
static CacheEntry invalid(String reason) {
|
||||
return new CacheEntry(false, null, reason);
|
||||
}
|
||||
|
||||
boolean isExpired(long ttlSeconds) {
|
||||
return (Instant.now().getEpochSecond() - createdEpochSeconds) > ttlSeconds;
|
||||
}
|
||||
}
|
||||
|
||||
/** Exception for OCL timeout scenarios */
|
||||
private static class OclTimeoutException extends RuntimeException {
|
||||
OclTimeoutException(String message) { super(message); }
|
||||
}
|
||||
|
||||
/** Exception for OCL HTTP 5xx responses */
|
||||
private static class OclServerErrorException extends RuntimeException {
|
||||
final int statusCode;
|
||||
OclServerErrorException(int statusCode) {
|
||||
super("OCL server error: " + statusCode);
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
}
|
||||
|
||||
/** Cache statistics for the admin endpoint */
|
||||
public record CacheStats(int totalEntries, long expiredEntries, long ttlSeconds) {}
|
||||
}
|
||||
@@ -0,0 +1,182 @@
|
||||
package bd.gov.dghs.fhir.terminology;
|
||||
|
||||
import bd.gov.dghs.fhir.interceptor.KeycloakJwtInterceptor;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import java.time.Instant;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Admin REST controller exposing the terminology cache management endpoint.
|
||||
*
|
||||
* <h2>Endpoints</h2>
|
||||
* <pre>
|
||||
* DELETE /admin/terminology/cache
|
||||
* Flushes the ICD-11 validation cache.
|
||||
* Required role: fhir-admin (NOT mci-api)
|
||||
* Called by: ICD-11 version upgrade pipeline after OCL import completes.
|
||||
* See: ops/version-upgrade-integration.md
|
||||
*
|
||||
* GET /admin/terminology/cache/stats
|
||||
* Returns current cache statistics.
|
||||
* Required role: fhir-admin
|
||||
* </pre>
|
||||
*
|
||||
* <h2>Security</h2>
|
||||
* <p>Both endpoints require the {@code fhir-admin} Keycloak role.
|
||||
* The {@link KeycloakJwtInterceptor} enforces authentication and the
|
||||
* {@code mci-api} role for FHIR resource endpoints. For admin endpoints,
|
||||
* this controller performs an additional role check for {@code fhir-admin}.
|
||||
*
|
||||
* <p>The cache flush endpoint is a denial-of-service vector if unauthenticated:
|
||||
* an attacker repeatedly flushing the cache forces 50,000+ cold OCL
|
||||
* {@code $validate-code} calls per flush cycle. The {@code fhir-admin}
|
||||
* role requirement is the primary protection. Additionally, the endpoint
|
||||
* is rate-limited at nginx (see nginx.conf — /admin/ location block).
|
||||
*
|
||||
* <h2>Upgrade pipeline integration</h2>
|
||||
* <p>The version upgrade pipeline calls this endpoint after completing:
|
||||
* <ol>
|
||||
* <li>OCL ICD-11 import</li>
|
||||
* <li>concept_class patch for Diagnosis + Finding concepts</li>
|
||||
* <li>bd-condition-icd11-diagnosis-valueset repopulation</li>
|
||||
* </ol>
|
||||
* Order is critical: the cache must be flushed AFTER OCL has the new codes,
|
||||
* not before. Flushing before OCL import completes means validation calls
|
||||
* hit OCL with the old version and repopulate the cache with stale results,
|
||||
* negating the purpose of the flush.
|
||||
*
|
||||
* @see BdTerminologyValidationSupport#flushCache()
|
||||
* @see ops/version-upgrade-integration.md
|
||||
*/
|
||||
@RestController
|
||||
@RequestMapping("/admin/terminology")
|
||||
public class TerminologyCacheManager {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(TerminologyCacheManager.class);
|
||||
|
||||
private final BdTerminologyValidationSupport terminologySupport;
|
||||
|
||||
public TerminologyCacheManager(BdTerminologyValidationSupport terminologySupport) {
|
||||
this.terminologySupport = terminologySupport;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes the ICD-11 terminology validation cache.
|
||||
*
|
||||
* <p>After this call returns 200, the next validation request for every
|
||||
* ICD-11 code will hit OCL directly. The cache will repopulate organically
|
||||
* as vendors submit resources. There is no pre-warming mechanism — the cache
|
||||
* is demand-driven.
|
||||
*
|
||||
* <p>This endpoint is idempotent: calling it multiple times has the same
|
||||
* effect as calling it once. The upgrade pipeline may call it safely on
|
||||
* retry without side effects.
|
||||
*
|
||||
* @return 200 with flush summary if successful
|
||||
* @return 403 if caller does not have {@code fhir-admin} role
|
||||
*/
|
||||
@DeleteMapping(value = "/cache", produces = MediaType.APPLICATION_JSON_VALUE)
|
||||
public ResponseEntity<Map<String, Object>> flushCache(HttpServletRequest request) {
|
||||
// Enforce fhir-admin role — this is a separate check from the
|
||||
// KeycloakJwtInterceptor mci-api check. The interceptor allows any
|
||||
// authenticated mci-api request through to the FHIR endpoints.
|
||||
// Admin endpoints require a different, more privileged role.
|
||||
if (!hasAdminRole(request)) {
|
||||
log.warn("Cache flush rejected: caller lacks fhir-admin role. " +
|
||||
"clientId={}", getClientId(request));
|
||||
return ResponseEntity.status(403).body(Map.of(
|
||||
"error", "Forbidden",
|
||||
"message", "Cache flush requires fhir-admin role",
|
||||
"timestamp", Instant.now().toString()
|
||||
));
|
||||
}
|
||||
|
||||
String clientId = getClientId(request);
|
||||
String requestId = getRequestId(request);
|
||||
|
||||
log.info("Terminology cache flush initiated: clientId={} requestId={}",
|
||||
clientId, requestId);
|
||||
|
||||
int evicted = terminologySupport.flushCache();
|
||||
|
||||
log.info("Terminology cache flush completed: evicted={} clientId={} requestId={}",
|
||||
evicted, clientId, requestId);
|
||||
|
||||
return ResponseEntity.ok(Map.of(
|
||||
"status", "flushed",
|
||||
"entriesEvicted", evicted,
|
||||
"timestamp", Instant.now().toString(),
|
||||
"message", "Terminology cache flushed. Next validation requests " +
|
||||
"will call OCL directly until cache repopulates.",
|
||||
"requestId", requestId != null ? requestId : "unknown"
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current cache statistics.
|
||||
*
|
||||
* <p>Useful for the upgrade pipeline to verify cache state before and
|
||||
* after a flush, and for ops to understand cache hit rates.
|
||||
*/
|
||||
@GetMapping(value = "/cache/stats", produces = MediaType.APPLICATION_JSON_VALUE)
|
||||
public ResponseEntity<Map<String, Object>> getCacheStats(HttpServletRequest request) {
|
||||
if (!hasAdminRole(request)) {
|
||||
return ResponseEntity.status(403).body(Map.of(
|
||||
"error", "Forbidden",
|
||||
"message", "Cache stats require fhir-admin role"
|
||||
));
|
||||
}
|
||||
|
||||
BdTerminologyValidationSupport.CacheStats stats = terminologySupport.getCacheStats();
|
||||
|
||||
return ResponseEntity.ok(Map.of(
|
||||
"totalEntries", stats.totalEntries(),
|
||||
"expiredEntries", stats.expiredEntries(),
|
||||
"liveEntries", stats.totalEntries() - stats.expiredEntries(),
|
||||
"cacheTtlSeconds", stats.ttlSeconds(),
|
||||
"timestamp", Instant.now().toString()
|
||||
));
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Private helpers — extract JWT claims set by KeycloakJwtInterceptor
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Checks whether the request has the {@code fhir-admin} role.
|
||||
*
|
||||
* <p>{@link KeycloakJwtInterceptor} validates the JWT and stores extracted
|
||||
* claims as request attributes. The admin role claim is stored under the key
|
||||
* {@code BD_FHIR_IS_ADMIN}. This avoids re-parsing the JWT in this controller.
|
||||
*
|
||||
* <p>If the JWT interceptor has not run (e.g., request came through a path
|
||||
* not intercepted by HAPI), the attribute will be absent and this returns
|
||||
* {@code false} — fail closed.
|
||||
*/
|
||||
private boolean hasAdminRole(HttpServletRequest request) {
|
||||
Object adminFlag = request.getAttribute(
|
||||
KeycloakJwtInterceptor.REQUEST_ATTR_IS_ADMIN);
|
||||
return Boolean.TRUE.equals(adminFlag);
|
||||
}
|
||||
|
||||
private String getClientId(HttpServletRequest request) {
|
||||
Object clientId = request.getAttribute(
|
||||
KeycloakJwtInterceptor.REQUEST_ATTR_CLIENT_ID);
|
||||
return clientId != null ? clientId.toString() : "unknown";
|
||||
}
|
||||
|
||||
private String getRequestId(HttpServletRequest request) {
|
||||
Object requestId = request.getAttribute(
|
||||
KeycloakJwtInterceptor.REQUEST_ATTR_REQUEST_ID);
|
||||
return requestId != null ? requestId.toString() : null;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,483 @@
|
||||
package bd.gov.dghs.fhir.validator;
|
||||
|
||||
import ca.uhn.fhir.rest.api.server.RequestDetails;
|
||||
import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.hc.client5.http.classic.methods.HttpPost;
|
||||
import org.apache.hc.client5.http.config.RequestConfig;
|
||||
import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
|
||||
import org.apache.hc.client5.http.impl.classic.HttpClients;
|
||||
import org.apache.hc.core5.http.ClassicHttpResponse;
|
||||
import org.apache.hc.core5.http.ContentType;
|
||||
import org.apache.hc.core5.http.io.entity.StringEntity;
|
||||
import org.apache.hc.core5.util.Timeout;
|
||||
import org.hl7.fhir.r4.model.CodeableConcept;
|
||||
import org.hl7.fhir.r4.model.Coding;
|
||||
import org.hl7.fhir.r4.model.Condition;
|
||||
import org.hl7.fhir.r4.model.Extension;
|
||||
import org.hl7.fhir.r4.model.OperationOutcome;
|
||||
import org.hl7.fhir.r4.model.Resource;
|
||||
import org.hl7.fhir.r4.model.StringType;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Validates ICD-11 postcoordinated cluster expressions in FHIR resources.
|
||||
*
|
||||
* <h2>BD Core IG Cluster Expression Pattern</h2>
|
||||
* <p>BD Core IG prohibits raw postcoordinated strings in {@code Coding.code}.
|
||||
* Cluster expressions MUST be represented as:
|
||||
* <pre>
|
||||
* "code": {
|
||||
* "coding": [{
|
||||
* "system": "http://id.who.int/icd/release/11/mms",
|
||||
* "code": "XY9Z", <-- stem code (validated by OCL)
|
||||
* "extension": [{
|
||||
* "url": "icd11-cluster-expression",
|
||||
* "valueString": "XY9Z&has_manifestation=AB12" <-- full expression
|
||||
* }]
|
||||
* }]
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* <h2>Validation logic</h2>
|
||||
* <p>For each {@code Coding} element with system = ICD-11 MMS:
|
||||
* <ol>
|
||||
* <li>Check if {@code icd11-cluster-expression} extension is present.</li>
|
||||
* <li>If present: extract stem code and cluster expression. Call cluster
|
||||
* validator to validate the full expression. Both OCL stem validation
|
||||
* (handled by {@link BdTerminologyValidationSupport}) AND cluster
|
||||
* expression validation must pass. If cluster validation fails: 422.</li>
|
||||
* <li>If absent: check if {@code Coding.code} contains postcoordination
|
||||
* syntax characters ({@code &}, {@code /}, {@code %}). If yes: reject
|
||||
* with 422 — raw postcoordinated strings without the extension are
|
||||
* explicitly prohibited by BD Core IG. If no: plain stem code, no
|
||||
* cluster validation required.</li>
|
||||
* </ol>
|
||||
*
|
||||
* <h2>Integration with OCL validation</h2>
|
||||
* <p>This validator is invoked AFTER OCL has validated the stem code.
|
||||
* If OCL rejects the stem code, the resource is already rejected before
|
||||
* this class is called. This validator handles the additional cluster
|
||||
* expression validation layer.
|
||||
*
|
||||
* <p>This class is invoked by {@link bd.gov.dghs.fhir.interceptor.AuditEventInterceptor}
|
||||
* as a pre-storage hook, after the HAPI RequestValidatingInterceptor has run
|
||||
* profile + OCL validation.
|
||||
*/
|
||||
@Component
|
||||
public class ClusterExpressionValidator {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ClusterExpressionValidator.class);
|
||||
|
||||
/** ICD-11 MMS system URI */
|
||||
private static final String ICD11_SYSTEM = "http://id.who.int/icd/release/11/mms";
|
||||
|
||||
/** Extension URL that marks a Coding as containing a cluster expression */
|
||||
private static final String CLUSTER_EXT_URL = "icd11-cluster-expression";
|
||||
|
||||
/**
|
||||
* Characters that indicate a raw postcoordinated expression in Coding.code.
|
||||
* BD Core IG prohibits these in Coding.code without the cluster extension.
|
||||
*/
|
||||
private static final char[] POSTCOORD_CHARS = {'&', '/', '%'};
|
||||
|
||||
@Value("${bd.fhir.cluster-validator.url}")
|
||||
private String clusterValidatorUrl;
|
||||
|
||||
@Value("${bd.fhir.cluster-validator.timeout-seconds}")
|
||||
private int timeoutSeconds;
|
||||
|
||||
private CloseableHttpClient httpClient;
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
@PostConstruct
|
||||
public void initialise() {
|
||||
RequestConfig config = RequestConfig.custom()
|
||||
.setConnectionRequestTimeout(Timeout.ofSeconds(timeoutSeconds))
|
||||
.setResponseTimeout(Timeout.ofSeconds(timeoutSeconds))
|
||||
.build();
|
||||
|
||||
this.httpClient = HttpClients.custom()
|
||||
.setDefaultRequestConfig(config)
|
||||
.setMaxConnTotal(10)
|
||||
.setMaxConnPerRoute(10)
|
||||
.evictExpiredConnections()
|
||||
.evictIdleConnections(Timeout.ofMinutes(5))
|
||||
.build();
|
||||
|
||||
log.info("ClusterExpressionValidator initialised: url={}, timeoutSeconds={}",
|
||||
clusterValidatorUrl, timeoutSeconds);
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
public void shutdown() {
|
||||
try {
|
||||
if (httpClient != null) httpClient.close();
|
||||
} catch (IOException e) {
|
||||
log.warn("Error closing cluster validator HttpClient: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Public API — called by AuditEventInterceptor
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Validates all ICD-11 coded elements in a resource for cluster expressions.
|
||||
*
|
||||
* <p>This method is a no-op for resource types that do not contain ICD-11
|
||||
* coded elements (e.g., Patient, Practitioner). It only performs validation
|
||||
* for resource types that carry {@code CodeableConcept} elements with ICD-11
|
||||
* codings.
|
||||
*
|
||||
* @param resource the resource being submitted
|
||||
* @param requestDetails HAPI request context (for element path reporting)
|
||||
* @throws UnprocessableEntityException with FHIR OperationOutcome if
|
||||
* cluster expression validation fails
|
||||
*/
|
||||
public void validateResource(Resource resource, RequestDetails requestDetails) {
|
||||
List<ClusterValidationTarget> targets = extractTargets(resource);
|
||||
|
||||
if (targets.isEmpty()) {
|
||||
return; // No ICD-11 coded elements — nothing to validate
|
||||
}
|
||||
|
||||
List<OperationOutcome.OperationOutcomeIssueComponent> issues = new ArrayList<>();
|
||||
|
||||
for (ClusterValidationTarget target : targets) {
|
||||
validateTarget(target, issues);
|
||||
}
|
||||
|
||||
if (!issues.isEmpty()) {
|
||||
throw buildUnprocessableEntityException(issues);
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Target extraction — identify all ICD-11 Coding elements in resource
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Extracts all ICD-11 Coding elements from a resource that need cluster validation.
|
||||
*
|
||||
* <p>Currently handles: Condition.code
|
||||
* Extend this method as BD Core IG adds cluster expression support to
|
||||
* other resource types (e.g., Observation.code, Procedure.code).
|
||||
*/
|
||||
private List<ClusterValidationTarget> extractTargets(Resource resource) {
|
||||
List<ClusterValidationTarget> targets = new ArrayList<>();
|
||||
|
||||
if (resource instanceof Condition condition) {
|
||||
extractFromCodeableConcept(
|
||||
condition.getCode(), "Condition.code", targets);
|
||||
}
|
||||
|
||||
// Future: add Observation.code, Procedure.code, etc. here as
|
||||
// BD Core IG expands cluster expression support to other profiles.
|
||||
|
||||
return targets;
|
||||
}
|
||||
|
||||
private void extractFromCodeableConcept(
|
||||
CodeableConcept codeableConcept,
|
||||
String fhirPath,
|
||||
List<ClusterValidationTarget> targets) {
|
||||
|
||||
if (codeableConcept == null || codeableConcept.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < codeableConcept.getCoding().size(); i++) {
|
||||
Coding coding = codeableConcept.getCoding().get(i);
|
||||
String codingPath = fhirPath + ".coding[" + i + "]";
|
||||
|
||||
if (!ICD11_SYSTEM.equals(coding.getSystem())) {
|
||||
continue; // Not ICD-11 — skip
|
||||
}
|
||||
|
||||
String code = coding.getCode();
|
||||
if (code == null || code.isBlank()) {
|
||||
continue; // No code — let profile validation handle this
|
||||
}
|
||||
|
||||
// Check for cluster extension
|
||||
Extension clusterExt = findClusterExtension(coding);
|
||||
|
||||
if (clusterExt != null) {
|
||||
// Cluster extension present — extract expression and validate
|
||||
String clusterExpression = extractClusterExpression(clusterExt);
|
||||
if (clusterExpression == null || clusterExpression.isBlank()) {
|
||||
// Extension present but empty — reject
|
||||
targets.add(new ClusterValidationTarget(
|
||||
code, null, codingPath,
|
||||
ClusterValidationTarget.Type.EMPTY_EXTENSION));
|
||||
} else {
|
||||
targets.add(new ClusterValidationTarget(
|
||||
code, clusterExpression, codingPath,
|
||||
ClusterValidationTarget.Type.CLUSTER_EXPRESSION));
|
||||
}
|
||||
} else {
|
||||
// No cluster extension — check if code looks like raw postcoordination
|
||||
if (containsPostcoordinationChars(code)) {
|
||||
// Raw postcoordinated string without extension — prohibited
|
||||
targets.add(new ClusterValidationTarget(
|
||||
code, null, codingPath,
|
||||
ClusterValidationTarget.Type.RAW_POSTCOORD));
|
||||
}
|
||||
// Plain stem code — no cluster validation needed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Validation execution
|
||||
// =========================================================================
|
||||
|
||||
private void validateTarget(
|
||||
ClusterValidationTarget target,
|
||||
List<OperationOutcome.OperationOutcomeIssueComponent> issues) {
|
||||
|
||||
switch (target.type) {
|
||||
case RAW_POSTCOORD -> {
|
||||
// Reject immediately — no external call needed
|
||||
log.info("Rejected raw postcoordinated ICD-11 code without " +
|
||||
"icd11-cluster-expression extension: path={} code={}",
|
||||
target.fhirPath, target.stemCode);
|
||||
issues.add(buildIssue(
|
||||
OperationOutcome.IssueSeverity.ERROR,
|
||||
OperationOutcome.IssueType.BUSINESSRULE,
|
||||
"ICD-11 postcoordinated expression in " + target.fhirPath +
|
||||
" must use the icd11-cluster-expression extension. " +
|
||||
"Raw postcoordinated strings in Coding.code are prohibited " +
|
||||
"by BD Core IG. Found code: '" + target.stemCode + "'. " +
|
||||
"The stem code must be in Coding.code and the full cluster " +
|
||||
"expression must be in the icd11-cluster-expression extension.",
|
||||
target.fhirPath + ".code"));
|
||||
}
|
||||
|
||||
case EMPTY_EXTENSION -> {
|
||||
issues.add(buildIssue(
|
||||
OperationOutcome.IssueSeverity.ERROR,
|
||||
OperationOutcome.IssueType.REQUIRED,
|
||||
"icd11-cluster-expression extension at " + target.fhirPath +
|
||||
" is present but contains no value. " +
|
||||
"The extension valueString must contain the full " +
|
||||
"ICD-11 cluster expression.",
|
||||
target.fhirPath + ".extension[url=icd11-cluster-expression].valueString"));
|
||||
}
|
||||
|
||||
case CLUSTER_EXPRESSION -> {
|
||||
// Call cluster validator middleware
|
||||
validateClusterExpression(target, issues);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls the cluster validator middleware to validate a postcoordinated expression.
|
||||
*
|
||||
* <p>The cluster validator validates the syntactic and semantic correctness
|
||||
* of the full postcoordinated cluster expression. OCL validates the stem code
|
||||
* (handled upstream). Both must pass.
|
||||
*
|
||||
* <p>Request body format (application/json):
|
||||
* <pre>
|
||||
* {
|
||||
* "stemCode": "XY9Z",
|
||||
* "clusterExpression": "XY9Z&has_manifestation=AB12",
|
||||
* "system": "http://id.who.int/icd/release/11/mms"
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* <p>Response format on success (200):
|
||||
* <pre>
|
||||
* { "valid": true }
|
||||
* </pre>
|
||||
*
|
||||
* <p>Response format on failure (200 with valid=false, or 422):
|
||||
* <pre>
|
||||
* { "valid": false, "reason": "Invalid axis: has_manifestation requires..." }
|
||||
* </pre>
|
||||
*/
|
||||
private void validateClusterExpression(
|
||||
ClusterValidationTarget target,
|
||||
List<OperationOutcome.OperationOutcomeIssueComponent> issues) {
|
||||
|
||||
try {
|
||||
String requestBody = objectMapper.writeValueAsString(Map.of(
|
||||
"stemCode", target.stemCode,
|
||||
"clusterExpression", target.clusterExpression,
|
||||
"system", ICD11_SYSTEM
|
||||
));
|
||||
|
||||
HttpPost post = new HttpPost(clusterValidatorUrl);
|
||||
post.setHeader("Content-Type", "application/json");
|
||||
post.setHeader("Accept", "application/json");
|
||||
post.setEntity(new StringEntity(requestBody, ContentType.APPLICATION_JSON));
|
||||
|
||||
ClusterValidationResponse response = httpClient.execute(
|
||||
post, (ClassicHttpResponse httpResponse) -> {
|
||||
int status = httpResponse.getCode();
|
||||
|
||||
if (status == 200 || status == 422) {
|
||||
byte[] body = httpResponse.getEntity().getContent().readAllBytes();
|
||||
return parseClusterResponse(body, status);
|
||||
} else if (status >= 500) {
|
||||
// Cluster validator server error — fail-open
|
||||
// Document the same fail-open policy as OCL
|
||||
log.warn("Cluster validator server error: status={} " +
|
||||
"stemCode={} expression={} — accepting (fail-open)",
|
||||
status, target.stemCode, target.clusterExpression);
|
||||
return ClusterValidationResponse.failOpen(
|
||||
"Cluster validator unavailable (HTTP " + status + ")");
|
||||
} else {
|
||||
log.warn("Unexpected cluster validator status: {}", status);
|
||||
return ClusterValidationResponse.failOpen(
|
||||
"Unexpected cluster validator response: " + status);
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.valid && !response.failOpen) {
|
||||
log.info("Cluster expression rejected: path={} stemCode={} " +
|
||||
"expression={} reason={}",
|
||||
target.fhirPath, target.stemCode,
|
||||
target.clusterExpression, response.reason);
|
||||
|
||||
issues.add(buildIssue(
|
||||
OperationOutcome.IssueSeverity.ERROR,
|
||||
OperationOutcome.IssueType.CODEINVALID,
|
||||
"ICD-11 cluster expression at " + target.fhirPath +
|
||||
" failed validation. " +
|
||||
"Stem code: '" + target.stemCode + "'. " +
|
||||
"Expression: '" + target.clusterExpression + "'. " +
|
||||
"Reason: " + (response.reason != null ? response.reason :
|
||||
"Expression is not a valid ICD-11 postcoordinated cluster."),
|
||||
target.fhirPath + ".extension[url=icd11-cluster-expression].valueString"));
|
||||
} else if (response.failOpen) {
|
||||
log.warn("Cluster validation skipped (fail-open): path={} stemCode={}",
|
||||
target.fhirPath, target.stemCode);
|
||||
// Resource accepted despite cluster validator unavailability.
|
||||
// The AuditEventInterceptor will log this as a warning.
|
||||
} else {
|
||||
log.debug("Cluster expression valid: path={} stemCode={}",
|
||||
target.fhirPath, target.stemCode);
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
// Connection failure — fail-open (same policy as OCL)
|
||||
log.error("Cluster validator connection failed: stemCode={} expression={} error={}",
|
||||
target.stemCode, target.clusterExpression, e.getMessage());
|
||||
// Do not add to issues — fail-open
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Helpers
|
||||
// =========================================================================
|
||||
|
||||
private Extension findClusterExtension(Coding coding) {
|
||||
return coding.getExtension().stream()
|
||||
.filter(ext -> CLUSTER_EXT_URL.equals(ext.getUrl()))
|
||||
.findFirst()
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
private String extractClusterExpression(Extension extension) {
|
||||
if (extension.getValue() instanceof StringType st) {
|
||||
return st.getValue();
|
||||
}
|
||||
// Handle other value types gracefully
|
||||
return extension.getValue() != null ? extension.getValue().toString() : null;
|
||||
}
|
||||
|
||||
private boolean containsPostcoordinationChars(String code) {
|
||||
for (char c : POSTCOORD_CHARS) {
|
||||
if (code.indexOf(c) >= 0) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private ClusterValidationResponse parseClusterResponse(byte[] body, int statusCode) {
|
||||
try {
|
||||
var node = objectMapper.readTree(body);
|
||||
boolean valid = node.path("valid").asBoolean(false);
|
||||
String reason = node.path("reason").asText(null);
|
||||
|
||||
if (statusCode == 422) {
|
||||
// Explicit rejection from cluster validator
|
||||
return new ClusterValidationResponse(false, false,
|
||||
reason != null ? reason : "Cluster expression rejected by validator");
|
||||
}
|
||||
return new ClusterValidationResponse(valid, false, reason);
|
||||
} catch (Exception e) {
|
||||
log.warn("Could not parse cluster validator response: {}", e.getMessage());
|
||||
return ClusterValidationResponse.failOpen("Could not parse validator response");
|
||||
}
|
||||
}
|
||||
|
||||
private OperationOutcome.OperationOutcomeIssueComponent buildIssue(
|
||||
OperationOutcome.IssueSeverity severity,
|
||||
OperationOutcome.IssueType type,
|
||||
String diagnostics,
|
||||
String expression) {
|
||||
|
||||
var issue = new OperationOutcome.OperationOutcomeIssueComponent();
|
||||
issue.setSeverity(severity);
|
||||
issue.setCode(type);
|
||||
issue.setDiagnostics(diagnostics);
|
||||
if (expression != null) {
|
||||
issue.addExpression(expression);
|
||||
}
|
||||
return issue;
|
||||
}
|
||||
|
||||
private UnprocessableEntityException buildUnprocessableEntityException(
|
||||
List<OperationOutcome.OperationOutcomeIssueComponent> issues) {
|
||||
|
||||
OperationOutcome oo = new OperationOutcome();
|
||||
issues.forEach(oo::addIssue);
|
||||
|
||||
return new UnprocessableEntityException(
|
||||
ca.uhn.fhir.context.FhirContext.forR4(),
|
||||
oo);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Inner classes
|
||||
// =========================================================================
|
||||
|
||||
/** Represents a single ICD-11 Coding element requiring cluster validation */
|
||||
private record ClusterValidationTarget(
|
||||
String stemCode,
|
||||
String clusterExpression,
|
||||
String fhirPath,
|
||||
Type type) {
|
||||
|
||||
enum Type {
|
||||
CLUSTER_EXPRESSION, // has extension with valid value — call cluster validator
|
||||
RAW_POSTCOORD, // has postcoord chars in code, no extension — reject immediately
|
||||
EMPTY_EXTENSION // has extension but empty value — reject immediately
|
||||
}
|
||||
}
|
||||
|
||||
/** Response from the cluster validator middleware */
|
||||
private record ClusterValidationResponse(
|
||||
boolean valid,
|
||||
boolean failOpen,
|
||||
String reason) {
|
||||
|
||||
static ClusterValidationResponse failOpen(String reason) {
|
||||
return new ClusterValidationResponse(true, true, reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
534
hapi-overlay/src/main/resources/application.yaml
Normal file
534
hapi-overlay/src/main/resources/application.yaml
Normal file
@@ -0,0 +1,534 @@
|
||||
# =============================================================================
|
||||
# BD FHIR National — application.yaml
|
||||
# Spring Boot 3.2.x + HAPI FHIR 7.2.0
|
||||
#
|
||||
# ALL secrets and environment-specific values come from environment variables.
|
||||
# No secret value appears in this file — only ${VARIABLE_NAME} references.
|
||||
# This file is safe to commit to version control.
|
||||
#
|
||||
# Profile: prod (set via SPRING_PROFILES_ACTIVE=prod in docker-compose)
|
||||
# Overrides can be placed in application-prod.yaml for prod-only values,
|
||||
# but this single file covers all required configuration.
|
||||
# =============================================================================
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# SERVER
|
||||
# -----------------------------------------------------------------------------
|
||||
server:
|
||||
port: 8080
|
||||
|
||||
# Graceful shutdown: allow in-flight requests to complete before container
|
||||
# stops. Docker stop sends SIGTERM → Spring waits up to grace-period for
|
||||
# active requests to finish → then shuts down JVM.
|
||||
# tini in Dockerfile ensures SIGTERM is forwarded correctly to the JVM.
|
||||
shutdown: graceful
|
||||
|
||||
# Tomcat connector tuning
|
||||
tomcat:
|
||||
# Max threads: number of concurrent HTTP requests HAPI can process.
|
||||
# Each FHIR validation call is CPU-bound (IG profile check) + IO-bound
|
||||
# (OCL $validate-code call). With OCL timeout=10s and 5 DB connections
|
||||
# per replica, 20 threads is the correct ceiling at pilot scale.
|
||||
# Increasing beyond DB pool size causes thread queueing, not parallelism.
|
||||
threads:
|
||||
max: 20
|
||||
min-spare: 5
|
||||
# Connection timeout: reject connections held open without sending data.
|
||||
connection-timeout: 20000
|
||||
# Accept count: queue depth when all threads are busy.
|
||||
# 50 is generous for pilot scale. Requests beyond this get TCP RST.
|
||||
accept-count: 50
|
||||
|
||||
# Servlet context path — HAPI registers its own /fhir path internally.
|
||||
# Do not set context-path here; HAPI's RestfulServer manages the /fhir prefix.
|
||||
servlet:
|
||||
context-path: /
|
||||
|
||||
lifecycle:
|
||||
# Must match server.shutdown: graceful
|
||||
timeout-per-shutdown-phase: 30s
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# SPRING CORE
|
||||
# -----------------------------------------------------------------------------
|
||||
spring:
|
||||
application:
|
||||
name: bd-fhir-hapi
|
||||
|
||||
profiles:
|
||||
active: ${SPRING_PROFILES_ACTIVE:prod}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# PRIMARY DATASOURCE — FHIR store (postgres-fhir via pgBouncer)
|
||||
# ---------------------------------------------------------------------------
|
||||
datasource:
|
||||
url: ${SPRING_DATASOURCE_URL}
|
||||
username: ${SPRING_DATASOURCE_USERNAME}
|
||||
password: ${SPRING_DATASOURCE_PASSWORD}
|
||||
driver-class-name: org.postgresql.Driver
|
||||
|
||||
hikari:
|
||||
pool-name: ${SPRING_DATASOURCE_HIKARI_POOL_NAME:fhir-pool}
|
||||
maximum-pool-size: ${SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE:5}
|
||||
minimum-idle: ${SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE:2}
|
||||
connection-timeout: ${SPRING_DATASOURCE_HIKARI_CONNECTION_TIMEOUT:30000}
|
||||
idle-timeout: ${SPRING_DATASOURCE_HIKARI_IDLE_TIMEOUT:600000}
|
||||
max-lifetime: ${SPRING_DATASOURCE_HIKARI_MAX_LIFETIME:1800000}
|
||||
|
||||
# pgBouncer session mode compatibility:
|
||||
# autoCommit=true is safe with session mode — pgBouncer does not
|
||||
# reset autoCommit between sessions (it maintains session state).
|
||||
auto-commit: true
|
||||
|
||||
# Connection test query — verified against PostgreSQL 15.
|
||||
# isValid() is preferred over connectionTestQuery for JDBC4 drivers
|
||||
# (PostgreSQL JDBC is JDBC4). Leaving connectionTestQuery unset
|
||||
# causes HikariCP to use isValid() which is more efficient.
|
||||
|
||||
# Leak detection: log a warning if a connection is held for >60s.
|
||||
# A connection held beyond this is almost certainly a hung OCL call
|
||||
# or a transaction that was not committed. Set to 0 in dev to disable.
|
||||
leak-detection-threshold: 60000
|
||||
|
||||
data-source-properties:
|
||||
# PostgreSQL-specific: use server-side prepared statements.
|
||||
# With pgBouncer session mode this is safe. With transaction mode
|
||||
# it would break — another reason session mode is mandatory here.
|
||||
prepareThreshold: 5
|
||||
# Batch size for inserts — improves search index write throughput
|
||||
reWriteBatchedInserts: true
|
||||
# Application name visible in pg_stat_activity
|
||||
ApplicationName: bd-fhir-hapi
|
||||
# Socket timeout: fail fast if PostgreSQL becomes unreachable.
|
||||
# Without this, Hikari waits indefinitely on a dead connection.
|
||||
socketTimeout: 30
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# JPA / HIBERNATE
|
||||
# ---------------------------------------------------------------------------
|
||||
jpa:
|
||||
# CRITICAL: validate — Hibernate checks schema matches entities but does
|
||||
# NOT create or alter tables. Flyway owns all DDL.
|
||||
# Never use create, create-drop, or update in production.
|
||||
hibernate:
|
||||
ddl-auto: validate
|
||||
|
||||
properties:
|
||||
hibernate:
|
||||
# PostgreSQL dialect for Hibernate 6 (bundled with Spring Boot 3.2)
|
||||
dialect: org.hibernate.dialect.PostgreSQLDialect
|
||||
|
||||
# Search index batch writes — dramatically reduces INSERT round-trips
|
||||
# for search parameter indexing. HAPI inserts many HFJ_SPIDX_* rows
|
||||
# per resource. Batch size 50 reduces round-trips by 50x.
|
||||
jdbc:
|
||||
batch_size: 50
|
||||
order_inserts: true
|
||||
order_updates: true
|
||||
# Fetch size for result set streaming — prevents OOM on large searches
|
||||
fetch_size: 100
|
||||
|
||||
# Connection handling: DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION
|
||||
# Releases the JDBC connection back to HikariCP after each transaction
|
||||
# rather than holding it for the lifetime of the Session.
|
||||
# Critical for efficient connection pool use with HAPI's long-lived
|
||||
# EntityManager sessions.
|
||||
connection:
|
||||
handling_mode: DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION
|
||||
|
||||
# Second-level cache: disabled. HAPI manages its own caches.
|
||||
# Enabling Hibernate L2 cache with HAPI causes cache coherence issues
|
||||
# across replicas. Never enable for HAPI.
|
||||
cache:
|
||||
use_second_level_cache: false
|
||||
use_query_cache: false
|
||||
|
||||
# Format SQL in logs for readability — disable in prod for performance
|
||||
format_sql: false
|
||||
# Log SQL — enable only for debugging, never in production
|
||||
# (would log patient data to container stdout)
|
||||
show_sql: false
|
||||
|
||||
# Statistics: disabled in production. Enable temporarily for
|
||||
# performance tuning with: LOGGING_LEVEL_ORG_HIBERNATE_STAT=DEBUG
|
||||
generate_statistics: false
|
||||
|
||||
# Open EntityManager in View: false — prevents lazy loading outside
|
||||
# transactions. HAPI handles its own transaction boundaries.
|
||||
open-in-view: false
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# FLYWAY — FHIR schema (V1__hapi_schema.sql)
|
||||
# Connects directly to postgres-fhir bypassing pgBouncer (see Step 4 notes).
|
||||
# ---------------------------------------------------------------------------
|
||||
flyway:
|
||||
enabled: true
|
||||
url: ${SPRING_FLYWAY_URL}
|
||||
user: ${SPRING_FLYWAY_USER}
|
||||
password: ${SPRING_FLYWAY_PASSWORD}
|
||||
# V1 migration only — audit schema has its own Flyway instance (DataSourceConfig.java)
|
||||
locations: classpath:db/migration/fhir
|
||||
# Baseline on migrate: if the database already has tables (e.g., from a
|
||||
# previous manual setup), Flyway will baseline rather than fail.
|
||||
# Set to false after first successful migration.
|
||||
baseline-on-migrate: false
|
||||
# Validate on migrate: Flyway checks checksum of already-applied migrations.
|
||||
# If V1 was modified after being applied, startup fails. This is correct
|
||||
# behaviour — it catches accidental migration file edits.
|
||||
validate-on-migrate: true
|
||||
# Out-of-order: false — migrations must run in version order.
|
||||
out-of-order: false
|
||||
# Table: Flyway's own metadata table name
|
||||
table: flyway_schema_history
|
||||
# Mixed: allow mixing versioned and repeatable migrations
|
||||
mixed: false
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# JACKSON — JSON serialisation
|
||||
# ---------------------------------------------------------------------------
|
||||
jackson:
|
||||
# HAPI serialises FHIR resources using its own FHIR serialiser, not Jackson.
|
||||
# Jackson is used for: actuator endpoints, audit log payloads,
|
||||
# OCL/cluster validator response parsing, admin REST responses.
|
||||
serialization:
|
||||
write-dates-as-timestamps: false
|
||||
indent-output: false
|
||||
deserialization:
|
||||
fail-on-unknown-properties: false
|
||||
default-property-inclusion: non_null
|
||||
time-zone: UTC
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# TASK EXECUTION — Spring's async executor
|
||||
# Used by @Async methods: AuditEventEmitter writes audit records
|
||||
# asynchronously so they do not block the FHIR request thread.
|
||||
# ---------------------------------------------------------------------------
|
||||
task:
|
||||
execution:
|
||||
pool:
|
||||
core-size: 4
|
||||
max-size: 10
|
||||
queue-capacity: 500
|
||||
# Thread name prefix — visible in thread dumps and profilers
|
||||
keep-alive: 60s
|
||||
thread-name-prefix: audit-async-
|
||||
|
||||
scheduling:
|
||||
pool:
|
||||
size: 2
|
||||
thread-name-prefix: scheduled-
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# HAPI FHIR CONFIGURATION
|
||||
# These properties are read by HAPI's Spring Boot auto-configuration.
|
||||
# Reference: https://hapifhir.io/hapi-fhir/docs/server_jpa/configuration.html
|
||||
# -----------------------------------------------------------------------------
|
||||
hapi:
|
||||
fhir:
|
||||
# FHIR version — must be R4 for BD Core IG 0.2.1
|
||||
fhir-version: R4
|
||||
|
||||
# Server base URL — must match nginx proxy_pass destination and
|
||||
# the URL published in BD Core IG CapabilityStatement.
|
||||
server-address: ${HAPI_FHIR_SERVER_ADDRESS:https://fhir.dghs.gov.bd/fhir}
|
||||
|
||||
# Server name — appears in CapabilityStatement.software.name
|
||||
server-name: "BD FHIR National Repository"
|
||||
server-version: "1.0.0"
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# VALIDATION — enforce on ALL requests, no exceptions
|
||||
# -------------------------------------------------------------------------
|
||||
validation:
|
||||
# Enable request validation — every incoming resource is validated
|
||||
# before storage. This is the primary enforcement point for BD Core IG.
|
||||
enabled: true
|
||||
# Validate responses — disable for performance. Outgoing resources
|
||||
# were already validated on ingestion. Re-validating on read adds
|
||||
# latency with no security benefit.
|
||||
response-enabled: false
|
||||
# Request header validation — validate HTTP headers
|
||||
request-only: false
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# STORAGE
|
||||
# -------------------------------------------------------------------------
|
||||
# Allow multiple delete: allow batch deletes (needed for admin operations)
|
||||
allow-multiple-delete: false
|
||||
# Allow external references: true — BD Core IG uses canonical URLs
|
||||
allow-external-references: true
|
||||
# Reuse cached search results for repeated identical queries
|
||||
reuse-cached-search-results-millis: 60000
|
||||
# Default page size for search results
|
||||
default-page-size: 20
|
||||
max-page-size: 200
|
||||
# Retain cached searches for 1 hour
|
||||
expire-search-results-after-millis: 3600000
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# RESOURCE TYPES
|
||||
# Only R4 resource types in scope for BD national deployment.
|
||||
# Unknown resource types are accepted with unvalidated-profile meta tag
|
||||
# (handled in FhirServerConfig.java).
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# NARRATIVE GENERATION
|
||||
# Disable — BD vendors provide their own narratives or none at all.
|
||||
# Generating narratives server-side adds CPU overhead with no clinical value.
|
||||
# -------------------------------------------------------------------------
|
||||
narrative-enabled: false
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# CORS — disabled at HAPI level, handled at nginx level
|
||||
# -------------------------------------------------------------------------
|
||||
cors:
|
||||
enabled: false
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# SUBSCRIPTIONS — disabled for pilot phase
|
||||
# Enable in Phase 2 when real-time notification requirements are defined.
|
||||
# -------------------------------------------------------------------------
|
||||
subscription:
|
||||
resthook-enabled: false
|
||||
websocket-enabled: false
|
||||
email-enabled: false
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# BULK EXPORT — enabled for DGHS analytics
|
||||
# -------------------------------------------------------------------------
|
||||
bulk-export-enabled: true
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# PARTITIONING — disabled (single-tenant national deployment)
|
||||
# -------------------------------------------------------------------------
|
||||
partitioning:
|
||||
partitioning-enabled: false
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# ADVANCED LUCENE INDEXING — disabled
|
||||
# Uses Hibernate Search / Lucene for full-text search.
|
||||
# Not required for BD Core IG search parameters (all token/date/reference).
|
||||
# Enabling adds significant memory overhead and startup time.
|
||||
# -------------------------------------------------------------------------
|
||||
advanced-lucene-indexing: false
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# TERMINOLOGY
|
||||
# Primary terminology validation is handled by BdTerminologyValidationSupport
|
||||
# (custom class, Step 7). These HAPI settings control the built-in
|
||||
# terminology infrastructure that runs alongside our custom support.
|
||||
# -------------------------------------------------------------------------
|
||||
# Cache terminology lookups in HAPI's internal cache.
|
||||
# Our custom cache (24h TTL) is separate from HAPI's internal cache.
|
||||
# Set HAPI's internal cache shorter so our cache is the effective cache.
|
||||
terminology-cache-size: 1000
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# AUDIT DATASOURCE
|
||||
# Custom datasource — not managed by Spring Boot auto-configuration.
|
||||
# Configured programmatically in DataSourceConfig.java.
|
||||
# Environment variables read here for reference — actual wiring in Java.
|
||||
# -----------------------------------------------------------------------------
|
||||
audit:
|
||||
datasource:
|
||||
url: ${AUDIT_DATASOURCE_URL}
|
||||
username: ${AUDIT_DATASOURCE_USERNAME}
|
||||
password: ${AUDIT_DATASOURCE_PASSWORD}
|
||||
hikari:
|
||||
pool-name: ${AUDIT_DATASOURCE_HIKARI_POOL_NAME:audit-pool}
|
||||
maximum-pool-size: ${AUDIT_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE:2}
|
||||
minimum-idle: ${AUDIT_DATASOURCE_HIKARI_MINIMUM_IDLE:1}
|
||||
# Longer connection timeout — audit failures should not
|
||||
# block FHIR request processing
|
||||
connection-timeout: 5000
|
||||
idle-timeout: 300000
|
||||
max-lifetime: 900000
|
||||
auto-commit: true
|
||||
data-source-properties:
|
||||
ApplicationName: bd-fhir-hapi-audit
|
||||
socketTimeout: 10
|
||||
flyway:
|
||||
url: ${AUDIT_FLYWAY_URL}
|
||||
user: ${AUDIT_FLYWAY_USER}
|
||||
password: ${AUDIT_FLYWAY_PASSWORD}
|
||||
locations: classpath:db/migration/audit
|
||||
table: flyway_audit_schema_history
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# OCL TERMINOLOGY SERVICE
|
||||
# Configuration for BdTerminologyValidationSupport (Step 7).
|
||||
# All values injected as environment variables.
|
||||
# -----------------------------------------------------------------------------
|
||||
bd:
|
||||
fhir:
|
||||
ocl:
|
||||
base-url: ${HAPI_OCL_BASE_URL:https://tr.ocl.dghs.gov.bd/api/fhir}
|
||||
timeout-seconds: ${HAPI_OCL_TIMEOUT_SECONDS:10}
|
||||
retry-attempts: ${HAPI_OCL_RETRY_ATTEMPTS:2}
|
||||
# ICD-11 MMS system URI — must match what BD Core IG profiles declare
|
||||
icd11-system: http://id.who.int/icd/release/11/mms
|
||||
# BD Condition ValueSet canonical URL
|
||||
condition-valueset-url: https://fhir.dghs.gov.bd/core/ValueSet/bd-condition-icd11-diagnosis-valueset
|
||||
|
||||
cluster-validator:
|
||||
url: ${HAPI_CLUSTER_VALIDATOR_URL:https://icd11.dghs.gov.bd/cluster/validate}
|
||||
timeout-seconds: ${HAPI_CLUSTER_VALIDATOR_TIMEOUT_SECONDS:10}
|
||||
# Extension URL that marks a Coding as containing a cluster expression
|
||||
extension-url: icd11-cluster-expression
|
||||
# ICD-11 system URI — same as above, repeated for cluster validator context
|
||||
icd11-system: http://id.who.int/icd/release/11/mms
|
||||
|
||||
keycloak:
|
||||
issuer: ${KEYCLOAK_ISSUER:https://auth.dghs.gov.bd/realms/hris}
|
||||
jwks-url: ${KEYCLOAK_JWKS_URL:https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/certs}
|
||||
required-role: ${KEYCLOAK_REQUIRED_ROLE:mci-api}
|
||||
admin-role: ${KEYCLOAK_ADMIN_ROLE:fhir-admin}
|
||||
jwks-cache-ttl-seconds: ${KEYCLOAK_JWKS_CACHE_TTL_SECONDS:3600}
|
||||
|
||||
ig:
|
||||
package-classpath: ${HAPI_IG_PACKAGE_CLASSPATH:classpath:packages/bd.gov.dghs.core-0.2.1.tgz}
|
||||
version: ${HAPI_IG_VERSION:0.2.1}
|
||||
|
||||
terminology:
|
||||
# 24-hour cache TTL for validated ICD-11 codes (in seconds)
|
||||
cache-ttl-seconds: ${HAPI_TERMINOLOGY_CACHE_TTL_SECONDS:86400}
|
||||
|
||||
# Unknown resource type handling
|
||||
# Resources with types not profiled in BD Core IG are stored with
|
||||
# meta.tag = unvalidated-profile (see FhirServerConfig.java)
|
||||
unvalidated-profile-tag-system: https://fhir.dghs.gov.bd/tags
|
||||
unvalidated-profile-tag-code: unvalidated-profile
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# ACTUATOR — health, info, metrics
|
||||
# -----------------------------------------------------------------------------
|
||||
management:
|
||||
endpoints:
|
||||
web:
|
||||
# Only expose endpoints needed by load balancer and ops team.
|
||||
# Never expose env, beans, or mappings in production — they leak
|
||||
# configuration including partially-masked secrets.
|
||||
exposure:
|
||||
include:
|
||||
- health
|
||||
- info
|
||||
- metrics
|
||||
- prometheus
|
||||
- loggers
|
||||
base-path: /actuator
|
||||
|
||||
endpoint:
|
||||
health:
|
||||
# Show details only to authenticated requests.
|
||||
# Load balancer hits /actuator/health/liveness — no auth needed for liveness.
|
||||
show-details: when-authorized
|
||||
show-components: when-authorized
|
||||
# Separate liveness and readiness probes.
|
||||
# liveness: is the JVM alive? (load balancer uses this)
|
||||
# readiness: is the application ready to serve traffic?
|
||||
# readiness waits for: Flyway migrations complete, IG loaded,
|
||||
# OCL connectivity verified.
|
||||
probes:
|
||||
enabled: true
|
||||
group:
|
||||
liveness:
|
||||
include:
|
||||
- livenessState
|
||||
- ping
|
||||
readiness:
|
||||
include:
|
||||
- readinessState
|
||||
- db # FHIR datasource
|
||||
- auditDb # Audit datasource (custom indicator)
|
||||
- ocl # OCL reachability (custom indicator)
|
||||
|
||||
info:
|
||||
enabled: true
|
||||
metrics:
|
||||
enabled: true
|
||||
prometheus:
|
||||
enabled: true
|
||||
loggers:
|
||||
enabled: true
|
||||
|
||||
# Health indicator configuration
|
||||
health:
|
||||
# Disable default DataSourceHealthIndicator — it would check ALL datasources
|
||||
# including the INSERT-only audit datasource with SELECT 1, which would fail
|
||||
# for the audit_writer role. We replace it with custom indicators.
|
||||
db:
|
||||
enabled: true # kept for FHIR datasource (hapi_app user can SELECT 1)
|
||||
# Disable default disk space check — not useful in container environment
|
||||
diskspace:
|
||||
enabled: false
|
||||
# Disable default livenessstate check behaviour override
|
||||
defaults:
|
||||
enabled: true
|
||||
|
||||
# Metrics
|
||||
metrics:
|
||||
tags:
|
||||
# Common tags on all metrics — useful for filtering in Prometheus/Grafana
|
||||
application: bd-fhir-hapi
|
||||
environment: ${SPRING_PROFILES_ACTIVE:prod}
|
||||
export:
|
||||
prometheus:
|
||||
enabled: true
|
||||
|
||||
# Info endpoint
|
||||
info:
|
||||
env:
|
||||
enabled: true
|
||||
build:
|
||||
enabled: true
|
||||
git:
|
||||
enabled: true
|
||||
mode: simple
|
||||
|
||||
# Application metadata for /actuator/info
|
||||
info:
|
||||
application:
|
||||
name: BD FHIR National Repository
|
||||
description: National FHIR R4 repository and validation engine
|
||||
ig-version: ${HAPI_IG_VERSION:0.2.1}
|
||||
fhir-version: R4
|
||||
hapi-version: 7.2.0
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# LOGGING
|
||||
# Structured JSON output for ELK ingestion.
|
||||
# Log levels controlled via environment variables in docker-compose.
|
||||
# IMPORTANT: Never log at DEBUG in production — FHIR resources contain
|
||||
# patient data. DEBUG logs in HAPI output full resource JSON.
|
||||
# -----------------------------------------------------------------------------
|
||||
logging:
|
||||
level:
|
||||
root: ${LOGGING_LEVEL_ROOT:WARN}
|
||||
bd.gov.dghs: ${LOGGING_LEVEL_BD_GOV_DGHS:INFO}
|
||||
bd.gov.dghs.fhir.interceptor: ${LOGGING_LEVEL_BD_GOV_DGHS_FHIR_INTERCEPTOR:INFO}
|
||||
bd.gov.dghs.fhir.terminology: ${LOGGING_LEVEL_BD_GOV_DGHS_FHIR_TERMINOLOGY:INFO}
|
||||
bd.gov.dghs.fhir.validator: ${LOGGING_LEVEL_BD_GOV_DGHS_FHIR_VALIDATOR:INFO}
|
||||
ca.uhn.hapi.fhir: ${LOGGING_LEVEL_CA_UHN_HAPI:WARN}
|
||||
org.springframework: ${LOGGING_LEVEL_ORG_SPRINGFRAMEWORK:WARN}
|
||||
org.springframework.web: WARN
|
||||
org.hibernate: WARN
|
||||
org.hibernate.SQL: WARN
|
||||
# Flyway: INFO to see migration progress on startup
|
||||
org.flywaydb: INFO
|
||||
# HikariCP: WARN unless debugging pool exhaustion
|
||||
com.zaxxer.hikari: WARN
|
||||
# Nimbus JWT: WARN unless debugging token validation
|
||||
com.nimbusds: WARN
|
||||
|
||||
pattern:
|
||||
# Structured JSON logging via logstash-logback-encoder (configured in
|
||||
# logback-spring.xml). This pattern is the fallback for non-JSON output.
|
||||
console: "%d{yyyy-MM-dd'T'HH:mm:ss.SSSZ} [%thread] %-5level %logger{36} - %msg%n"
|
||||
|
||||
# Log file — written to Docker volume for Filebeat pickup
|
||||
file:
|
||||
name: /app/logs/bd-fhir-hapi.log
|
||||
max-size: 100MB
|
||||
max-history: 7
|
||||
total-size-cap: 1GB
|
||||
@@ -0,0 +1,632 @@
|
||||
-- =============================================================================
|
||||
-- V2__audit_schema.sql
|
||||
-- Audit Schema — PostgreSQL 15
|
||||
--
|
||||
-- Creates:
|
||||
-- audit.audit_events — FHIR AuditEvent records (partitioned by month)
|
||||
-- audit.fhir_rejected_submissions — Rejected resource payloads (partitioned by month)
|
||||
-- audit.health_check — Used by AuditDataSourceHealthIndicator
|
||||
-- audit.schema_version — Schema version tracking
|
||||
--
|
||||
-- Partitioning strategy:
|
||||
-- Both main tables use PARTITION BY RANGE (event_time) with monthly partitions.
|
||||
-- Partitions are pre-created for current year + 2 years forward.
|
||||
-- A maintenance job (cron) must create next-month partitions before month rollover.
|
||||
-- See ops/scaling-roadmap.md for partition maintenance instructions.
|
||||
--
|
||||
-- Security:
|
||||
-- Role audit_writer has INSERT only on audit schema.
|
||||
-- Role audit_reader has SELECT only (for DGHS analytics queries).
|
||||
-- HAPI JVM connects as audit_writer via datasource.audit.
|
||||
-- No UPDATE, DELETE, TRUNCATE granted to any application role.
|
||||
-- Only a DBA superuser can modify or delete audit records.
|
||||
--
|
||||
-- IMMUTABILITY NOTE:
|
||||
-- Application roles cannot UPDATE or DELETE rows.
|
||||
-- PostgreSQL row-level security is NOT used here — immutability is
|
||||
-- enforced entirely through GRANT/REVOKE at the schema level.
|
||||
-- For stronger guarantees, consider pg_audit extension or
|
||||
-- logical replication to an append-only replica.
|
||||
-- =============================================================================
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- SCHEMA
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS audit;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- ROLES
|
||||
-- Created here; passwords set via environment variable at runtime.
|
||||
-- If roles already exist (re-run scenario), skip creation.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'audit_writer') THEN
|
||||
CREATE ROLE audit_writer NOLOGIN;
|
||||
END IF;
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'audit_reader') THEN
|
||||
CREATE ROLE audit_reader NOLOGIN;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Grant schema usage
|
||||
GRANT USAGE ON SCHEMA audit TO audit_writer;
|
||||
GRANT USAGE ON SCHEMA audit TO audit_reader;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- SCHEMA VERSION
|
||||
-- Simple metadata table, not partitioned, not audited.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.schema_version (
|
||||
version VARCHAR(20) NOT NULL,
|
||||
applied_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
description TEXT,
|
||||
CONSTRAINT PK_audit_schema_version PRIMARY KEY (version)
|
||||
);
|
||||
|
||||
INSERT INTO audit.schema_version (version, description)
|
||||
VALUES ('2.0.0', 'Initial audit schema with monthly partitioning')
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- HEALTH CHECK TABLE
|
||||
-- Used exclusively by AuditDataSourceHealthIndicator.
|
||||
-- INSERT ... ON CONFLICT DO NOTHING avoids growing this table.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.health_check (
|
||||
check_id VARCHAR(36) NOT NULL,
|
||||
checked_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
CONSTRAINT PK_audit_health_check PRIMARY KEY (check_id)
|
||||
);
|
||||
|
||||
-- Seed one row so the health check INSERT can use ON CONFLICT
|
||||
INSERT INTO audit.health_check (check_id)
|
||||
VALUES ('00000000-0000-0000-0000-000000000000')
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
GRANT INSERT ON audit.health_check TO audit_writer;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- AUDIT EVENTS — partitioned by month
|
||||
--
|
||||
-- Columns:
|
||||
-- event_id — UUID, generated by application
|
||||
-- event_time — UTC timestamp (partition key)
|
||||
-- event_type — OPERATION | AUTH_FAILURE | VALIDATION_FAILURE
|
||||
-- operation — CREATE | UPDATE | DELETE | READ
|
||||
-- resource_type — Patient | Condition | Encounter | etc.
|
||||
-- resource_id — FHIR logical ID (may be null for rejected resources)
|
||||
-- resource_version— FHIR version number (null for rejected)
|
||||
-- outcome — ACCEPTED | REJECTED
|
||||
-- outcome_detail — Human-readable rejection reason
|
||||
-- sending_facility— Extracted from Keycloak token claim
|
||||
-- client_id — Keycloak client_id (fhir-vendor-{org-id})
|
||||
-- subject — Keycloak sub (service account user ID)
|
||||
-- request_ip — Client IP address from X-Forwarded-For or RemoteAddr
|
||||
-- request_id — Random UUID assigned per HTTP request for correlation
|
||||
-- validation_messages — JSONB array of OperationOutcome issues
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events (
|
||||
event_id UUID NOT NULL,
|
||||
event_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
event_type VARCHAR(30) NOT NULL
|
||||
CHECK (event_type IN ('OPERATION','AUTH_FAILURE','VALIDATION_FAILURE','TERMINOLOGY_FAILURE','CLUSTER_FAILURE')),
|
||||
operation VARCHAR(10)
|
||||
CHECK (operation IN ('CREATE','UPDATE','DELETE','READ','PATCH')),
|
||||
resource_type VARCHAR(40),
|
||||
resource_id VARCHAR(64),
|
||||
resource_version BIGINT,
|
||||
outcome VARCHAR(10) NOT NULL
|
||||
CHECK (outcome IN ('ACCEPTED','REJECTED')),
|
||||
outcome_detail TEXT,
|
||||
sending_facility VARCHAR(200),
|
||||
client_id VARCHAR(200) NOT NULL,
|
||||
subject VARCHAR(200) NOT NULL,
|
||||
request_ip VARCHAR(45), -- supports IPv6
|
||||
request_id VARCHAR(36),
|
||||
validation_messages JSONB,
|
||||
-- Partition key must be included in primary key for partitioned tables
|
||||
CONSTRAINT PK_audit_events PRIMARY KEY (event_id, event_time)
|
||||
) PARTITION BY RANGE (event_time);
|
||||
|
||||
-- Indexes on the parent table — PostgreSQL 11+ propagates to partitions
|
||||
CREATE INDEX IF NOT EXISTS IDX_AE_CLIENT_ID
|
||||
ON audit.audit_events (client_id, event_time DESC);
|
||||
CREATE INDEX IF NOT EXISTS IDX_AE_FACILITY
|
||||
ON audit.audit_events (sending_facility, event_time DESC);
|
||||
CREATE INDEX IF NOT EXISTS IDX_AE_RESOURCE
|
||||
ON audit.audit_events (resource_type, resource_id, event_time DESC)
|
||||
WHERE resource_id IS NOT NULL;
|
||||
CREATE INDEX IF NOT EXISTS IDX_AE_OUTCOME
|
||||
ON audit.audit_events (outcome, event_time DESC)
|
||||
WHERE outcome = 'REJECTED';
|
||||
CREATE INDEX IF NOT EXISTS IDX_AE_REQUEST_ID
|
||||
ON audit.audit_events (request_id)
|
||||
WHERE request_id IS NOT NULL;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- REJECTED SUBMISSIONS — partitioned by month
|
||||
--
|
||||
-- Stores full rejected resource payload for forensic purposes.
|
||||
-- Vendors can request their rejected submissions from DGHS for debugging.
|
||||
-- DO NOT expose this table directly via API — only through DGHS admin tools.
|
||||
--
|
||||
-- Columns:
|
||||
-- submission_id — UUID, generated by application
|
||||
-- submission_time — UTC timestamp (partition key)
|
||||
-- event_id — FK to audit.audit_events (same UUID)
|
||||
-- resource_type — FHIR resource type
|
||||
-- resource_payload— Full JSON payload as submitted (before any modification)
|
||||
-- rejection_code — Machine-readable rejection code
|
||||
-- rejection_reason— Human-readable rejection reason
|
||||
-- element_path — FHIRPath expression of the violating element
|
||||
-- violated_profile— URL of the violated profile constraint
|
||||
-- invalid_code — The invalid code value (for terminology rejections)
|
||||
-- invalid_system — The code system of the invalid code
|
||||
-- sending_facility— Extracted from Keycloak token claim
|
||||
-- client_id — Keycloak client_id
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions (
|
||||
submission_id UUID NOT NULL,
|
||||
submission_time TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
event_id UUID NOT NULL,
|
||||
resource_type VARCHAR(40),
|
||||
resource_payload TEXT NOT NULL, -- full JSON, not JSONB — preserve exact bytes
|
||||
rejection_code VARCHAR(50) NOT NULL
|
||||
CHECK (rejection_code IN (
|
||||
'PROFILE_VIOLATION',
|
||||
'TERMINOLOGY_INVALID_CODE',
|
||||
'TERMINOLOGY_INVALID_CLASS',
|
||||
'CLUSTER_EXPRESSION_INVALID',
|
||||
'CLUSTER_STEM_MISSING_EXTENSION',
|
||||
'AUTH_TOKEN_MISSING',
|
||||
'AUTH_TOKEN_EXPIRED',
|
||||
'AUTH_TOKEN_INVALID_SIGNATURE',
|
||||
'AUTH_TOKEN_MISSING_ROLE',
|
||||
'AUTH_TOKEN_INVALID_ISSUER'
|
||||
)),
|
||||
rejection_reason TEXT NOT NULL,
|
||||
element_path VARCHAR(500),
|
||||
violated_profile VARCHAR(500),
|
||||
invalid_code VARCHAR(200),
|
||||
invalid_system VARCHAR(200),
|
||||
sending_facility VARCHAR(200),
|
||||
client_id VARCHAR(200) NOT NULL,
|
||||
-- Partition key in PK
|
||||
CONSTRAINT PK_fhir_rejected_submissions PRIMARY KEY (submission_id, submission_time)
|
||||
) PARTITION BY RANGE (submission_time);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_RS_CLIENT_ID
|
||||
ON audit.fhir_rejected_submissions (client_id, submission_time DESC);
|
||||
CREATE INDEX IF NOT EXISTS IDX_RS_REJECTION_CODE
|
||||
ON audit.fhir_rejected_submissions (rejection_code, submission_time DESC);
|
||||
CREATE INDEX IF NOT EXISTS IDX_RS_FACILITY
|
||||
ON audit.fhir_rejected_submissions (sending_facility, submission_time DESC);
|
||||
CREATE INDEX IF NOT EXISTS IDX_RS_EVENT_ID
|
||||
ON audit.fhir_rejected_submissions (event_id);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- MONTHLY PARTITIONS — pre-created for 2025-2027
|
||||
--
|
||||
-- Naming convention: audit_events_YYYY_MM
|
||||
--
|
||||
-- MAINTENANCE REQUIREMENT:
|
||||
-- Create next month's partition BEFORE the 1st of each month.
|
||||
-- Failing to do so causes INSERT to fail with:
|
||||
-- "no partition of relation ... found for row"
|
||||
-- Add to cron on the audit PostgreSQL host:
|
||||
-- 0 0 20 * * psql -U postgres -d auditdb -c "SELECT audit.create_next_month_partitions();"
|
||||
-- See ops/scaling-roadmap.md for the partition maintenance function.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
-- Helper macro: generate partition DDL
|
||||
-- audit_events partitions
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_01
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-01-01') TO ('2025-02-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_02
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-02-01') TO ('2025-03-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_03
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-03-01') TO ('2025-04-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_04
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-04-01') TO ('2025-05-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_05
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-05-01') TO ('2025-06-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_06
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-06-01') TO ('2025-07-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_07
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-07-01') TO ('2025-08-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_08
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-08-01') TO ('2025-09-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_09
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-09-01') TO ('2025-10-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_10
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-10-01') TO ('2025-11-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_11
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-11-01') TO ('2025-12-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_12
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2025-12-01') TO ('2026-01-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_01
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-01-01') TO ('2026-02-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_02
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-02-01') TO ('2026-03-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_03
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-03-01') TO ('2026-04-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_04
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-04-01') TO ('2026-05-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_05
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-05-01') TO ('2026-06-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_06
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-06-01') TO ('2026-07-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_07
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-07-01') TO ('2026-08-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_08
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-08-01') TO ('2026-09-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_09
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-09-01') TO ('2026-10-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_10
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-10-01') TO ('2026-11-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_11
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-11-01') TO ('2026-12-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_12
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2026-12-01') TO ('2027-01-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_01
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-01-01') TO ('2027-02-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_02
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-02-01') TO ('2027-03-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_03
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-03-01') TO ('2027-04-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_04
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-04-01') TO ('2027-05-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_05
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-05-01') TO ('2027-06-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_06
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-06-01') TO ('2027-07-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_07
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-07-01') TO ('2027-08-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_08
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-08-01') TO ('2027-09-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_09
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-09-01') TO ('2027-10-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_10
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-10-01') TO ('2027-11-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_11
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-11-01') TO ('2027-12-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_12
|
||||
PARTITION OF audit.audit_events
|
||||
FOR VALUES FROM ('2027-12-01') TO ('2028-01-01');
|
||||
|
||||
-- fhir_rejected_submissions partitions (same date range)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_01
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-01-01') TO ('2025-02-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_02
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-02-01') TO ('2025-03-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_03
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-03-01') TO ('2025-04-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_04
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-04-01') TO ('2025-05-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_05
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-05-01') TO ('2025-06-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_06
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-06-01') TO ('2025-07-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_07
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-07-01') TO ('2025-08-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_08
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-08-01') TO ('2025-09-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_09
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-09-01') TO ('2025-10-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_10
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-10-01') TO ('2025-11-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_11
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-11-01') TO ('2025-12-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_12
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2025-12-01') TO ('2026-01-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_01
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-01-01') TO ('2026-02-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_02
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-02-01') TO ('2026-03-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_03
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-03-01') TO ('2026-04-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_04
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-04-01') TO ('2026-05-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_05
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-05-01') TO ('2026-06-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_06
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-06-01') TO ('2026-07-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_07
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-07-01') TO ('2026-08-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_08
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-08-01') TO ('2026-09-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_09
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-09-01') TO ('2026-10-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_10
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-10-01') TO ('2026-11-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_11
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-11-01') TO ('2026-12-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_12
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2026-12-01') TO ('2027-01-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_01
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-01-01') TO ('2027-02-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_02
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-02-01') TO ('2027-03-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_03
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-03-01') TO ('2027-04-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_04
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-04-01') TO ('2027-05-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_05
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-05-01') TO ('2027-06-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_06
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-06-01') TO ('2027-07-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_07
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-07-01') TO ('2027-08-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_08
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-08-01') TO ('2027-09-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_09
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-09-01') TO ('2027-10-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_10
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-10-01') TO ('2027-11-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_11
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-11-01') TO ('2027-12-01');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_12
|
||||
PARTITION OF audit.fhir_rejected_submissions
|
||||
FOR VALUES FROM ('2027-12-01') TO ('2028-01-01');
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- GRANTS — INSERT only for audit_writer, SELECT only for audit_reader
|
||||
-- Applied to parent tables; PostgreSQL propagates to all partitions.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
-- audit_writer: INSERT only — no SELECT, UPDATE, DELETE, TRUNCATE
|
||||
GRANT INSERT ON audit.audit_events TO audit_writer;
|
||||
GRANT INSERT ON audit.fhir_rejected_submissions TO audit_writer;
|
||||
|
||||
-- audit_reader: SELECT only — for DGHS analytics and admin tools
|
||||
GRANT SELECT ON audit.audit_events TO audit_reader;
|
||||
GRANT SELECT ON audit.fhir_rejected_submissions TO audit_reader;
|
||||
GRANT SELECT ON audit.schema_version TO audit_reader;
|
||||
|
||||
-- Sequences: audit_writer does not need sequence access because
|
||||
-- event_id and submission_id are UUID generated by the application,
|
||||
-- not database sequences.
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- PARTITION MAINTENANCE FUNCTION
|
||||
-- Callable by the cron job to create next month's partitions.
|
||||
-- Run on the 20th of each month to create the following month's partition.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE OR REPLACE FUNCTION audit.create_next_month_partitions()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER -- runs as the function owner (postgres superuser)
|
||||
AS $$
|
||||
DECLARE
|
||||
next_month DATE;
|
||||
month_after DATE;
|
||||
partition_name TEXT;
|
||||
month_str TEXT;
|
||||
BEGIN
|
||||
next_month := DATE_TRUNC('month', NOW()) + INTERVAL '1 month';
|
||||
month_after := next_month + INTERVAL '1 month';
|
||||
month_str := TO_CHAR(next_month, 'YYYY_MM');
|
||||
|
||||
-- audit_events partition
|
||||
partition_name := 'audit.audit_events_' || month_str;
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_class c
|
||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE n.nspname = 'audit'
|
||||
AND c.relname = 'audit_events_' || month_str
|
||||
) THEN
|
||||
EXECUTE format(
|
||||
'CREATE TABLE %I PARTITION OF audit.audit_events FOR VALUES FROM (%L) TO (%L)',
|
||||
'audit_events_' || month_str,
|
||||
next_month::TEXT,
|
||||
month_after::TEXT
|
||||
);
|
||||
RAISE NOTICE 'Created partition: %', partition_name;
|
||||
ELSE
|
||||
RAISE NOTICE 'Partition already exists: %', partition_name;
|
||||
END IF;
|
||||
|
||||
-- fhir_rejected_submissions partition
|
||||
partition_name := 'audit.fhir_rejected_submissions_' || month_str;
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_class c
|
||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE n.nspname = 'audit'
|
||||
AND c.relname = 'fhir_rejected_submissions_' || month_str
|
||||
) THEN
|
||||
EXECUTE format(
|
||||
'CREATE TABLE %I PARTITION OF audit.fhir_rejected_submissions FOR VALUES FROM (%L) TO (%L)',
|
||||
'fhir_rejected_submissions_' || month_str,
|
||||
next_month::TEXT,
|
||||
month_after::TEXT
|
||||
);
|
||||
RAISE NOTICE 'Created partition: %', partition_name;
|
||||
ELSE
|
||||
RAISE NOTICE 'Partition already exists: %', partition_name;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Grant execute to a maintenance role (not to audit_writer)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'audit_maintainer') THEN
|
||||
CREATE ROLE audit_maintainer NOLOGIN;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
GRANT EXECUTE ON FUNCTION audit.create_next_month_partitions() TO audit_maintainer;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- VERIFICATION QUERIES (run manually after migration to confirm correctness)
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
-- Confirm partition count (should be 36 per table for 2025-2027):
|
||||
-- SELECT COUNT(*) FROM pg_inherits i
|
||||
-- JOIN pg_class p ON p.oid = i.inhparent
|
||||
-- JOIN pg_namespace n ON n.oid = p.relnamespace
|
||||
-- WHERE n.nspname = 'audit' AND p.relname = 'audit_events';
|
||||
|
||||
-- Confirm INSERT-only grant for audit_writer:
|
||||
-- SELECT grantee, table_name, privilege_type
|
||||
-- FROM information_schema.role_table_grants
|
||||
-- WHERE table_schema = 'audit' AND grantee = 'audit_writer';
|
||||
-- Expected: only INSERT rows, no SELECT/UPDATE/DELETE.
|
||||
|
||||
-- Test partition routing:
|
||||
-- INSERT INTO audit.audit_events (event_id, event_time, event_type, outcome, client_id, subject)
|
||||
-- VALUES (gen_random_uuid(), NOW(), 'OPERATION', 'ACCEPTED', 'test', 'test');
|
||||
-- SELECT tableoid::regclass, event_time FROM audit.audit_events LIMIT 1;
|
||||
-- Should show audit.audit_events_YYYY_MM matching current month.
|
||||
@@ -0,0 +1,722 @@
|
||||
-- =============================================================================
|
||||
-- V1__hapi_schema.sql
|
||||
-- HAPI FHIR 7.2.0 JPA Schema — PostgreSQL 15
|
||||
--
|
||||
-- MAINTAINER WARNING:
|
||||
-- This file is the authoritative schema for the HAPI JPA store.
|
||||
-- It was derived from HAPI 7.2.0 Hibernate entity mappings.
|
||||
-- DO NOT MODIFY this file after it has run in any environment.
|
||||
-- If HAPI is upgraded, write V3__hapi_schema_upgrade_X_Y_Z.sql.
|
||||
--
|
||||
-- To verify this schema matches a new HAPI version:
|
||||
-- 1. Stand up HAPI with ddl-auto=create against a clean DB
|
||||
-- 2. Dump schema: pg_dump --schema-only
|
||||
-- 3. Diff against this file
|
||||
-- 4. Write incremental migration for any differences
|
||||
--
|
||||
-- PARTITIONING NOTE:
|
||||
-- HAPI JPA tables are NOT partitioned in this migration.
|
||||
-- Partition candidates at 10M+ resources:
|
||||
-- - HFJ_RESOURCE (partition by RES_TYPE or RES_UPDATED)
|
||||
-- - HFJ_RES_VER (partition by RES_UPDATED)
|
||||
-- - HFJ_SPIDX_STRING (partition by SP_UPDATED)
|
||||
-- - HFJ_SPIDX_TOKEN (partition by SP_UPDATED)
|
||||
-- - HFJ_SPIDX_DATE (partition by SP_LOW_VALUE)
|
||||
-- At <10,000 resources/day (pilot phase), PostgreSQL B-tree indexes
|
||||
-- are sufficient. Re-evaluate at 5M total resources.
|
||||
-- =============================================================================
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- SEQUENCES
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_resource_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_res_ver_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_history_tag_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_res_tag_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_forced_id_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_search_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_search_result_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_subscription_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_tag_def_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_res_link_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_string_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_token_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_number_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_quantity_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_date_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_uri_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_coords_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_concept_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_concept_map_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_concept_map_grp_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_concept_map_grp_elm_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_concept_map_grp_elm_tgt_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_valueset_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_valueset_concept_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS npm_package_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS npm_package_ver_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS npm_package_ver_res_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_batch2_job_inst_seq START WITH 1 INCREMENT BY 50;
|
||||
CREATE SEQUENCE IF NOT EXISTS hfj_batch2_wrkchunk_seq START WITH 1 INCREMENT BY 50;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- TAG DEFINITIONS
|
||||
-- Stores all tag values referenced by resources (security, profile, general)
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_TAG_DEF (
|
||||
TAG_ID BIGINT NOT NULL DEFAULT nextval('hfj_tag_def_seq'),
|
||||
TAG_CODE VARCHAR(200) NOT NULL,
|
||||
TAG_DISPLAY VARCHAR(200),
|
||||
TAG_SYSTEM VARCHAR(200),
|
||||
TAG_TYPE SMALLINT NOT NULL,
|
||||
CONSTRAINT PK_HFJ_TAG_DEF PRIMARY KEY (TAG_ID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_TAGDEF_TYPESYSCODE
|
||||
ON HFJ_TAG_DEF (TAG_TYPE, TAG_SYSTEM, TAG_CODE);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- CORE RESOURCE TABLE
|
||||
-- One row per logical resource (not per version).
|
||||
-- RES_VER column tracks current version number.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_RESOURCE (
|
||||
RES_ID BIGINT NOT NULL DEFAULT nextval('hfj_resource_seq'),
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
RES_VERSION VARCHAR(7) NOT NULL, -- FHIR version: R4
|
||||
RES_ENCODING_ENUM VARCHAR(11),
|
||||
RES_DELETED_AT TIMESTAMP WITH TIME ZONE,
|
||||
RES_LAST_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
RES_PUBLISHED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
RES_VER BIGINT NOT NULL,
|
||||
FHIR_ID VARCHAR(64),
|
||||
HAS_TAGS BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
SP_HAS_LINKS BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
HASH_SHA256 VARCHAR(64),
|
||||
RES_TITLE VARCHAR(200),
|
||||
CONSTRAINT PK_HFJ_RESOURCE PRIMARY KEY (RES_ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_RES_TYPE_FHIRID
|
||||
ON HFJ_RESOURCE (RES_TYPE, FHIR_ID);
|
||||
CREATE INDEX IF NOT EXISTS IDX_RES_UPDATED
|
||||
ON HFJ_RESOURCE (RES_LAST_UPDATED);
|
||||
CREATE INDEX IF NOT EXISTS IDX_RES_TYPE_UPDATED
|
||||
ON HFJ_RESOURCE (RES_TYPE, RES_LAST_UPDATED);
|
||||
|
||||
-- BD-specific: unvalidated-profile tag index
|
||||
-- Supports: GET /fhir/[type]?_tag=https://fhir.dghs.gov.bd/tags|unvalidated-profile
|
||||
CREATE INDEX IF NOT EXISTS IDX_RES_HASH
|
||||
ON HFJ_RESOURCE (HASH_SHA256)
|
||||
WHERE HASH_SHA256 IS NOT NULL;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- FORCED IDS
|
||||
-- Maps client-assigned logical IDs to internal numeric IDs
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_FORCED_ID (
|
||||
FORCEDID_ID BIGINT NOT NULL DEFAULT nextval('hfj_forced_id_seq'),
|
||||
RESOURCE_PID BIGINT NOT NULL,
|
||||
FHIR_ID VARCHAR(100) NOT NULL,
|
||||
RES_TYPE VARCHAR(40),
|
||||
CONSTRAINT PK_HFJ_FORCED_ID PRIMARY KEY (FORCEDID_ID),
|
||||
CONSTRAINT FK_FORCEDID_RESOURCE FOREIGN KEY (RESOURCE_PID)
|
||||
REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_FORCEDID_TYPE_FID
|
||||
ON HFJ_FORCED_ID (RES_TYPE, FHIR_ID);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_FORCEDID_RESID
|
||||
ON HFJ_FORCED_ID (RESOURCE_PID);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- RESOURCE VERSIONS
|
||||
-- Full resource content stored here. One row per version per resource.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_RES_VER (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_res_ver_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
RES_VER BIGINT NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
RES_ENCODING VARCHAR(11) NOT NULL,
|
||||
RES_TEXT TEXT,
|
||||
RES_TEXT_VC TEXT,
|
||||
RES_DELETED_AT TIMESTAMP WITH TIME ZONE,
|
||||
RES_LAST_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
RES_PUBLISHED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SOURCE_URI VARCHAR(100),
|
||||
REQUEST_ID VARCHAR(16),
|
||||
RES_TITLE VARCHAR(200),
|
||||
CONSTRAINT PK_HFJ_RES_VER PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_RESVER_RES FOREIGN KEY (RES_ID)
|
||||
REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_RESVER_ID_VER
|
||||
ON HFJ_RES_VER (RES_ID, RES_VER);
|
||||
CREATE INDEX IF NOT EXISTS IDX_RESVER_UPDATED
|
||||
ON HFJ_RES_VER (RES_LAST_UPDATED);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- RESOURCE TAGS
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_RES_TAG (
|
||||
TAG_ID BIGINT NOT NULL DEFAULT nextval('hfj_res_tag_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
TAG_DEFID BIGINT NOT NULL,
|
||||
RES_TYPE VARCHAR(40),
|
||||
CONSTRAINT PK_HFJ_RES_TAG PRIMARY KEY (TAG_ID),
|
||||
CONSTRAINT FK_RESTAG_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID),
|
||||
CONSTRAINT FK_RESTAG_TAGDEF FOREIGN KEY (TAG_DEFID) REFERENCES HFJ_TAG_DEF (TAG_ID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_RES_TAG
|
||||
ON HFJ_RES_TAG (RES_ID, TAG_DEFID);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_HISTORY_TAG (
|
||||
TAG_ID BIGINT NOT NULL DEFAULT nextval('hfj_history_tag_seq'),
|
||||
RES_VER_PID BIGINT NOT NULL,
|
||||
RES_TYPE VARCHAR(40),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
TAG_DEFID BIGINT NOT NULL,
|
||||
CONSTRAINT PK_HFJ_HISTORY_TAG PRIMARY KEY (TAG_ID),
|
||||
CONSTRAINT FK_HISTTAG_RESVER FOREIGN KEY (RES_VER_PID) REFERENCES HFJ_RES_VER (PID),
|
||||
CONSTRAINT FK_HISTTAG_TAGDEF FOREIGN KEY (TAG_DEFID) REFERENCES HFJ_TAG_DEF (TAG_ID)
|
||||
);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- RESOURCE LINKS (reference index)
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_RES_LINK (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_res_link_seq'),
|
||||
SRC_RESOURCE_ID BIGINT NOT NULL,
|
||||
SRC_PATH VARCHAR(200) NOT NULL,
|
||||
TARGET_RESOURCE_ID BIGINT,
|
||||
TARGET_RESOURCE_TYPE VARCHAR(40),
|
||||
TARGET_RESOURCE_URL VARCHAR(200),
|
||||
UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SP_USERAGENT VARCHAR(200),
|
||||
CONSTRAINT PK_HFJ_RES_LINK PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_RESLINK_SRC FOREIGN KEY (SRC_RESOURCE_ID)
|
||||
REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_RL_TPATHRES ON HFJ_RES_LINK (SRC_PATH, SRC_RESOURCE_ID);
|
||||
CREATE INDEX IF NOT EXISTS IDX_RL_TARGET ON HFJ_RES_LINK (TARGET_RESOURCE_ID);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- SEARCH PARAMETER INDEXES
|
||||
-- One table per data type. HAPI uses these for FHIR search queries.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
-- String parameters (name, address, etc.)
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_STRING (
|
||||
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_string_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
SP_NAME VARCHAR(100) NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SP_VALUE_EXACT VARCHAR(200),
|
||||
SP_VALUE_NORM VARCHAR(200),
|
||||
HASH_IDENTITY BIGINT,
|
||||
HASH_EXACT BIGINT,
|
||||
HASH_NORM_PREFIX BIGINT,
|
||||
CONSTRAINT PK_HFJ_SPIDX_STRING PRIMARY KEY (SP_ID),
|
||||
CONSTRAINT FK_SPIDXSTR_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_STRING_HASH_NRM ON HFJ_SPIDX_STRING (HASH_NORM_PREFIX, SP_VALUE_NORM);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_STRING_HASH_EXCT ON HFJ_SPIDX_STRING (HASH_EXACT);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_STRING_RESID ON HFJ_SPIDX_STRING (RES_ID);
|
||||
|
||||
-- Token parameters (code, identifier, status, etc.)
|
||||
-- Most heavily used for ICD-11 code searches
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_TOKEN (
|
||||
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_token_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
SP_NAME VARCHAR(100) NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SP_SYSTEM VARCHAR(200),
|
||||
SP_VALUE VARCHAR(200),
|
||||
HASH_IDENTITY BIGINT,
|
||||
HASH_SYS_AND_VALUE BIGINT,
|
||||
HASH_VALUE BIGINT,
|
||||
HASH_SYSTEM BIGINT,
|
||||
CONSTRAINT PK_HFJ_SPIDX_TOKEN PRIMARY KEY (SP_ID),
|
||||
CONSTRAINT FK_SPIDXTOK_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_TOKEN_HASH ON HFJ_SPIDX_TOKEN (HASH_IDENTITY);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_TOKEN_HASH_SV ON HFJ_SPIDX_TOKEN (HASH_SYS_AND_VALUE);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_TOKEN_HASH_V ON HFJ_SPIDX_TOKEN (HASH_VALUE);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_TOKEN_RESID ON HFJ_SPIDX_TOKEN (RES_ID);
|
||||
|
||||
-- Date parameters (birthdate, recorded, onset, etc.)
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_DATE (
|
||||
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_date_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
SP_NAME VARCHAR(100) NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SP_LOW_VALUE TIMESTAMP WITH TIME ZONE,
|
||||
SP_HIGH_VALUE TIMESTAMP WITH TIME ZONE,
|
||||
HASH_IDENTITY BIGINT,
|
||||
CONSTRAINT PK_HFJ_SPIDX_DATE PRIMARY KEY (SP_ID),
|
||||
CONSTRAINT FK_SPIDXDATE_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_DATE_HASH ON HFJ_SPIDX_DATE (HASH_IDENTITY);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_DATE_HASH_LOW ON HFJ_SPIDX_DATE (HASH_IDENTITY, SP_LOW_VALUE);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_DATE_HASH_HIGH ON HFJ_SPIDX_DATE (HASH_IDENTITY, SP_HIGH_VALUE);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_DATE_RESID ON HFJ_SPIDX_DATE (RES_ID);
|
||||
|
||||
-- Number parameters
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_NUMBER (
|
||||
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_number_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
SP_NAME VARCHAR(100) NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SP_VALUE NUMERIC(19,9),
|
||||
HASH_IDENTITY BIGINT,
|
||||
CONSTRAINT PK_HFJ_SPIDX_NUMBER PRIMARY KEY (SP_ID),
|
||||
CONSTRAINT FK_SPIDXNUM_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_NUMBER_HASH_VAL ON HFJ_SPIDX_NUMBER (HASH_IDENTITY, SP_VALUE);
|
||||
|
||||
-- Quantity parameters (Observation.value, etc.)
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_QUANTITY (
|
||||
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_quantity_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
SP_NAME VARCHAR(100) NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SP_VALUE NUMERIC(19,9),
|
||||
SP_SYSTEM VARCHAR(200),
|
||||
SP_UNITS VARCHAR(200),
|
||||
HASH_IDENTITY BIGINT,
|
||||
HASH_SYS_UNITS_VAL BIGINT,
|
||||
HASH_UNITS_VAL BIGINT,
|
||||
CONSTRAINT PK_HFJ_SPIDX_QUANTITY PRIMARY KEY (SP_ID),
|
||||
CONSTRAINT FK_SPIDXQTY_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_QUANTITY_HASH ON HFJ_SPIDX_QUANTITY (HASH_IDENTITY);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_QUANTITY_HASH_UN ON HFJ_SPIDX_QUANTITY (HASH_UNITS_VAL);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_QUANTITY_HASH_SUVAL ON HFJ_SPIDX_QUANTITY (HASH_SYS_UNITS_VAL);
|
||||
|
||||
-- URI parameters (url, instantiates, etc.)
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_URI (
|
||||
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_uri_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
SP_NAME VARCHAR(100) NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SP_URI VARCHAR(254),
|
||||
HASH_IDENTITY BIGINT,
|
||||
HASH_URI BIGINT,
|
||||
CONSTRAINT PK_HFJ_SPIDX_URI PRIMARY KEY (SP_ID),
|
||||
CONSTRAINT FK_SPIDXURI_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_URI_HASH_URI ON HFJ_SPIDX_URI (HASH_URI);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SP_URI_RESID ON HFJ_SPIDX_URI (RES_ID);
|
||||
|
||||
-- Coordinate parameters (Location.position)
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_COORDS (
|
||||
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_coords_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
SP_NAME VARCHAR(100) NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
SP_LATITUDE DOUBLE PRECISION,
|
||||
SP_LONGITUDE DOUBLE PRECISION,
|
||||
HASH_IDENTITY BIGINT,
|
||||
CONSTRAINT PK_HFJ_SPIDX_COORDS PRIMARY KEY (SP_ID),
|
||||
CONSTRAINT FK_SPIDXCOORDS_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- SEARCH RESULTS (paging)
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SEARCH (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_search_seq'),
|
||||
SEARCH_UUID VARCHAR(36) NOT NULL,
|
||||
RESOURCE_TYPE VARCHAR(200),
|
||||
SEARCH_TYPE SMALLINT NOT NULL,
|
||||
SEARCH_STATUS VARCHAR(10) NOT NULL,
|
||||
CREATED TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
EXPIRY_OR_NULL TIMESTAMP WITH TIME ZONE,
|
||||
TOTAL_COUNT INT,
|
||||
NUM_FOUND INT,
|
||||
SEARCH_PARAM_MAP BYTEA,
|
||||
SEARCH_LAST_RETURNED TIMESTAMP WITH TIME ZONE,
|
||||
SEARCH_QUERY_STRING TEXT,
|
||||
SEARCH_QUERY_STRING_HASH INT,
|
||||
PREFERRED_PAGE_SIZE INT,
|
||||
FAILURE_CODE INT,
|
||||
FAILURE_MESSAGE TEXT,
|
||||
SEARCH_DELETED BOOLEAN,
|
||||
LAST_UPDATED_HIGH TIMESTAMP WITH TIME ZONE,
|
||||
LAST_UPDATED_LOW TIMESTAMP WITH TIME ZONE,
|
||||
OPT_LOCK_VERSION INT,
|
||||
CONSTRAINT PK_HFJ_SEARCH PRIMARY KEY (PID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_SEARCH_UUID ON HFJ_SEARCH (SEARCH_UUID);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SEARCH_LASTRETURNED ON HFJ_SEARCH (SEARCH_LAST_RETURNED);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SEARCH_RESTYPE ON HFJ_SEARCH (RESOURCE_TYPE);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SEARCH_RESULT (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_search_result_seq'),
|
||||
SEARCH_PID BIGINT NOT NULL,
|
||||
RES_ID BIGINT NOT NULL,
|
||||
ORDER_NUM INT NOT NULL,
|
||||
CONSTRAINT PK_HFJ_SEARCH_RESULT PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_SEARCHRES_SEARCH FOREIGN KEY (SEARCH_PID) REFERENCES HFJ_SEARCH (PID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_SEARCHRES_ORDER
|
||||
ON HFJ_SEARCH_RESULT (SEARCH_PID, ORDER_NUM);
|
||||
CREATE INDEX IF NOT EXISTS IDX_SEARCHRES_RESID
|
||||
ON HFJ_SEARCH_RESULT (SEARCH_PID, RES_ID);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- SUBSCRIPTIONS
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_SUBSCRIPTION_STATS (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_subscription_seq'),
|
||||
RES_ID BIGINT NOT NULL,
|
||||
CREATED_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
DELIVERY_FAILED_COUNT INT NOT NULL DEFAULT 0,
|
||||
DELIVERY_ORPHANED_COUNT INT NOT NULL DEFAULT 0,
|
||||
DELIVERY_SUCCESS_COUNT INT NOT NULL DEFAULT 0,
|
||||
CONSTRAINT PK_HFJ_SUBSCRIPTION_STATS PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_SUBSC_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_SUBSC_RESID ON HFJ_SUBSCRIPTION_STATS (RES_ID);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- NPM PACKAGES (IG storage)
|
||||
-- HAPI writes IG package metadata here on first load.
|
||||
-- IgPackageInitializer uses advisory lock to prevent race condition
|
||||
-- on multi-replica startup.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS NPM_PACKAGE (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('npm_package_seq'),
|
||||
PACKAGE_ID VARCHAR(200) NOT NULL,
|
||||
CUR_VERSION_ID BIGINT,
|
||||
UPDATED_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
CONSTRAINT PK_NPM_PACKAGE PRIMARY KEY (PID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_NPMPACKAGE_PKGID ON NPM_PACKAGE (PACKAGE_ID);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS NPM_PACKAGE_VER (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('npm_package_ver_seq'),
|
||||
PACKAGE_ID VARCHAR(200) NOT NULL,
|
||||
VERSION_ID VARCHAR(200) NOT NULL,
|
||||
PKG_PID BIGINT NOT NULL,
|
||||
PACKAGE_DESC VARCHAR(200),
|
||||
FHIR_VERSION VARCHAR(10) NOT NULL,
|
||||
FHIR_VERSION_ID SMALLINT NOT NULL,
|
||||
CURRENT_VERSION BOOLEAN,
|
||||
UPDATED_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
INSTALLED_SIZE BIGINT,
|
||||
CONSTRAINT PK_NPM_PACKAGE_VER PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_NPM_PKG_VER_PKG FOREIGN KEY (PKG_PID) REFERENCES NPM_PACKAGE (PID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_NPMPACKAGEVER_URL
|
||||
ON NPM_PACKAGE_VER (PACKAGE_ID, VERSION_ID);
|
||||
CREATE INDEX IF NOT EXISTS IDX_NPMPACKAGEVER_PKGID
|
||||
ON NPM_PACKAGE_VER (PKG_PID);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS NPM_PACKAGE_VER_RES (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('npm_package_ver_res_seq'),
|
||||
PACKVER_PID BIGINT NOT NULL,
|
||||
RES_TYPE VARCHAR(40) NOT NULL,
|
||||
FHIR_ID VARCHAR(64) NOT NULL,
|
||||
FHIR_VERSION VARCHAR(10),
|
||||
FHIR_VERSION_ID SMALLINT,
|
||||
RES_VERSIONLESS_ID VARCHAR(200),
|
||||
FILE_DIR VARCHAR(200),
|
||||
FILE_NAME VARCHAR(200),
|
||||
RES_SIZE_BYTES BIGINT,
|
||||
CANONICAL_URL VARCHAR(200),
|
||||
CANONICAL_VERSION VARCHAR(200),
|
||||
RES_TEXT MEDIUMTEXT,
|
||||
CONSTRAINT PK_NPM_PKG_VER_RES PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_NPM_PKG_VER_RES_PKG FOREIGN KEY (PACKVER_PID)
|
||||
REFERENCES NPM_PACKAGE_VER (PID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_PACKVERRES_PACKVER ON NPM_PACKAGE_VER_RES (PACKVER_PID);
|
||||
CREATE INDEX IF NOT EXISTS IDX_PACKVERRES_CANONICAL ON NPM_PACKAGE_VER_RES (CANONICAL_URL);
|
||||
CREATE INDEX IF NOT EXISTS IDX_PACKVERRES_TYPE ON NPM_PACKAGE_VER_RES (RES_TYPE, FHIR_ID);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- TERMINOLOGY TABLES
|
||||
-- ConceptMap, ValueSet expansion cache, Concept definitions
|
||||
-- Note: BD Core IG uses OCL as terminology authority — these tables
|
||||
-- are used by HAPI's internal terminology infrastructure, not for
|
||||
-- primary ICD-11 storage.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_CODESYSTEM (
|
||||
PID BIGINT NOT NULL,
|
||||
RES_ID BIGINT,
|
||||
CS_NAME VARCHAR(200),
|
||||
CS_URI VARCHAR(200),
|
||||
CS_VERSION VARCHAR(200),
|
||||
CURRENT_VERSION_PID BIGINT,
|
||||
CODESYSTEM_PID BIGINT,
|
||||
CONSTRAINT PK_TRM_CODESYSTEM PRIMARY KEY (PID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_CS_NAME ON TRM_CODESYSTEM (CS_NAME);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_CODESYSTEM_VER (
|
||||
PID BIGINT NOT NULL,
|
||||
CS_PID BIGINT NOT NULL,
|
||||
RES_ID BIGINT,
|
||||
CS_VERSION_ID VARCHAR(200),
|
||||
CURRENT_VERSION BOOLEAN,
|
||||
CS_DISPLAY_NAME VARCHAR(200),
|
||||
CONSTRAINT PK_TRM_CODESYSTEM_VER PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_CODESYSVER_CS FOREIGN KEY (CS_PID) REFERENCES TRM_CODESYSTEM (PID)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_CONCEPT (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_seq'),
|
||||
CODESYSTEM_PID BIGINT NOT NULL,
|
||||
CODE VARCHAR(500) NOT NULL,
|
||||
DISPLAY VARCHAR(400),
|
||||
PARENT_PIDS TEXT,
|
||||
CODE_SEQUENCE INT,
|
||||
INDEX_STATUS BIGINT,
|
||||
CONCEPT_UPDATED TIMESTAMP WITH TIME ZONE,
|
||||
HASH_CODE BIGINT,
|
||||
CONSTRAINT PK_TRM_CONCEPT PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_CONCEPT_PID_CS FOREIGN KEY (CODESYSTEM_PID)
|
||||
REFERENCES TRM_CODESYSTEM_VER (PID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_CONCEPT_CODESYSTEM ON TRM_CONCEPT (CODESYSTEM_PID);
|
||||
CREATE INDEX IF NOT EXISTS IDX_CONCEPT_UPDATED ON TRM_CONCEPT (CONCEPT_UPDATED);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_CONCEPT_CS_CODE
|
||||
ON TRM_CONCEPT (CODESYSTEM_PID, CODE);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_CONCEPT_MAP (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_map_seq'),
|
||||
RES_ID BIGINT,
|
||||
CONCEPT_MAP_URL VARCHAR(200),
|
||||
CM_VERSION VARCHAR(200),
|
||||
SOURCE_VS VARCHAR(200),
|
||||
TARGET_VS VARCHAR(200),
|
||||
CONSTRAINT PK_TRM_CONCEPT_MAP PRIMARY KEY (PID)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_CONCEPT_MAP_GROUP (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_map_grp_seq'),
|
||||
MAP_PID BIGINT NOT NULL,
|
||||
SOURCE_CS VARCHAR(200),
|
||||
SOURCE_VER VARCHAR(200),
|
||||
TARGET_CS VARCHAR(200),
|
||||
TARGET_VER VARCHAR(200),
|
||||
MAP_ORDER INT,
|
||||
CONSTRAINT PK_TRM_CONCEPT_MAP_GROUP PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_CMG_MAP FOREIGN KEY (MAP_PID) REFERENCES TRM_CONCEPT_MAP (PID)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_CONCEPT_MAP_GRP_ELEMENT (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_map_grp_elm_seq'),
|
||||
GROUP_PID BIGINT NOT NULL,
|
||||
SOURCE_CODE VARCHAR(500) NOT NULL,
|
||||
SOURCE_DISPLAY VARCHAR(400),
|
||||
SYSTEM_VERSION VARCHAR(200),
|
||||
CONSTRAINT PK_TRM_CONCEPT_MAP_GRP_ELEMENT PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_CMEL_GRP FOREIGN KEY (GROUP_PID) REFERENCES TRM_CONCEPT_MAP_GROUP (PID)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_CONCEPT_MAP_GRP_ELM_TGT (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_map_grp_elm_tgt_seq'),
|
||||
ELEMENT_PID BIGINT NOT NULL,
|
||||
TARGET_CODE VARCHAR(500),
|
||||
TARGET_DISPLAY VARCHAR(400),
|
||||
TARGET_CODE_SYS_VER VARCHAR(200),
|
||||
TARGET_EQUIVALENCE VARCHAR(50),
|
||||
VALUESET_ORDER INT,
|
||||
HASH_IDENTITY BIGINT,
|
||||
CONSTRAINT PK_TRM_CONCEPT_MAP_GRP_ELM_TGT PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_CMELTGT_ELM FOREIGN KEY (ELEMENT_PID)
|
||||
REFERENCES TRM_CONCEPT_MAP_GRP_ELEMENT (PID)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_VALUESET (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_valueset_seq'),
|
||||
EXPANSION_STATUS VARCHAR(50) NOT NULL,
|
||||
VS_NAME VARCHAR(200),
|
||||
VS_URL VARCHAR(200) NOT NULL,
|
||||
VS_VERSION VARCHAR(200),
|
||||
TOTAL_CONCEPT_COUNT INT,
|
||||
EXPAN_ID VARCHAR(200),
|
||||
UPDATED_TIMESTAMP TIMESTAMP WITH TIME ZONE,
|
||||
HASH_IDENTITY BIGINT,
|
||||
CONSTRAINT PK_TRM_VALUESET PRIMARY KEY (PID)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_VS_HASH ON TRM_VALUESET (HASH_IDENTITY);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS TRM_VALUESET_CONCEPT (
|
||||
PID BIGINT NOT NULL DEFAULT nextval('hfj_valueset_concept_seq'),
|
||||
VALUESET_PID BIGINT NOT NULL,
|
||||
SYSTEM_URL VARCHAR(200) NOT NULL,
|
||||
SYSTEM_VER VARCHAR(200),
|
||||
CODEVAL VARCHAR(500) NOT NULL,
|
||||
DISPLAY VARCHAR(400),
|
||||
INDEX_STATUS BIGINT,
|
||||
SOURCE_PID BIGINT,
|
||||
VS_CONCEPT_UPDATED TIMESTAMP WITH TIME ZONE,
|
||||
HASH_CODEVAL BIGINT,
|
||||
HASH_SYS_AND_CODEVAL BIGINT,
|
||||
CONSTRAINT PK_TRM_VALUESET_CONCEPT PRIMARY KEY (PID),
|
||||
CONSTRAINT FK_TRM_VALUESET_PID FOREIGN KEY (VALUESET_PID) REFERENCES TRM_VALUESET (PID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_VSCON_VS ON TRM_VALUESET_CONCEPT (VALUESET_PID);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS IDX_VSCON_HASHCODEVAL
|
||||
ON TRM_VALUESET_CONCEPT (VALUESET_PID, HASH_CODEVAL)
|
||||
WHERE HASH_CODEVAL IS NOT NULL;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- BATCH2 JOB INFRASTRUCTURE (HAPI 7.x)
|
||||
-- Required for bulk export, $reindex, terminology import operations.
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_BATCH2_JOB_INST (
|
||||
ID VARCHAR(36) NOT NULL,
|
||||
CREATE_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
START_TIME TIMESTAMP WITH TIME ZONE,
|
||||
END_TIME TIMESTAMP WITH TIME ZONE,
|
||||
STAT VARCHAR(20) NOT NULL,
|
||||
JOB_DEFN_ID VARCHAR(100) NOT NULL,
|
||||
JOB_PARAMS TEXT,
|
||||
CMB_RECS_PROCESSED INT,
|
||||
CMB_RECS_PER_SEC DOUBLE PRECISION,
|
||||
TOT_ELAPSED_MILLIS INT,
|
||||
IS_WORK_CHUNKS_PURGED BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
WORK_CHUNKS_PURGED_TIME TIMESTAMP WITH TIME ZONE,
|
||||
ERROR_MSG VARCHAR(500),
|
||||
ERROR_COUNT INT NOT NULL DEFAULT 0,
|
||||
EST_REMAINING VARCHAR(100),
|
||||
CUR_GATED_STEP_ID VARCHAR(100),
|
||||
CANCELLED BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
REPORT TEXT,
|
||||
FAST_TRACKING BOOLEAN,
|
||||
TRIGGER_TIME TIMESTAMP WITH TIME ZONE,
|
||||
PARAMS_HASH BIGINT,
|
||||
CONSTRAINT PK_HFJ_BATCH2_JOB_INST PRIMARY KEY (ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_BATCH2JOBS_STAT ON HFJ_BATCH2_JOB_INST (STAT);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_BATCH2_WORK_CHUNK (
|
||||
ID VARCHAR(36) NOT NULL,
|
||||
SEQ INT NOT NULL,
|
||||
JOB_INSTANCE_ID VARCHAR(36) NOT NULL,
|
||||
TGT_STEP_ID VARCHAR(100) NOT NULL,
|
||||
STAT VARCHAR(20) NOT NULL,
|
||||
CREATE_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
START_TIME TIMESTAMP WITH TIME ZONE,
|
||||
END_TIME TIMESTAMP WITH TIME ZONE,
|
||||
ERROR_MSG VARCHAR(500),
|
||||
ERROR_COUNT INT NOT NULL DEFAULT 0,
|
||||
RECORDS_PROCESSED INT,
|
||||
CHUNK_DATA TEXT,
|
||||
WARNING_MSG VARCHAR(500),
|
||||
CONSTRAINT PK_HFJ_BATCH2_WORK_CHUNK PRIMARY KEY (ID),
|
||||
CONSTRAINT FK_BATCH2WC_JOB FOREIGN KEY (JOB_INSTANCE_ID)
|
||||
REFERENCES HFJ_BATCH2_JOB_INST (ID)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_BATCH2WC_JOB_STAT
|
||||
ON HFJ_BATCH2_WORK_CHUNK (JOB_INSTANCE_ID, STAT);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- RESOURCE HISTORY TABLE (cross-resource history view support)
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_RES_SEARCH_URL (
|
||||
RES_SEARCH_URL VARCHAR(2000) NOT NULL,
|
||||
RES_ID BIGINT NOT NULL,
|
||||
CREATED_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
CONSTRAINT PK_HFJ_RES_SEARCH_URL PRIMARY KEY (RES_SEARCH_URL),
|
||||
CONSTRAINT FK_RES_SEARCH_URL_RES FOREIGN KEY (RES_ID)
|
||||
REFERENCES HFJ_RESOURCE (RES_ID)
|
||||
);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- PARTITION TABLE (HAPI multi-tenancy — disabled for BD deployment
|
||||
-- but schema must exist as HAPI 7.x always references it)
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_PARTITION (
|
||||
PART_ID INT NOT NULL,
|
||||
PART_NAME VARCHAR(200) NOT NULL,
|
||||
PART_DESC VARCHAR(200),
|
||||
PART_STATUS VARCHAR(20) NOT NULL DEFAULT 'ACTIVE',
|
||||
CONSTRAINT PK_HFJ_PARTITION PRIMARY KEY (PART_ID)
|
||||
);
|
||||
|
||||
-- Default partition (required, always present)
|
||||
INSERT INTO HFJ_PARTITION (PART_ID, PART_NAME, PART_STATUS)
|
||||
VALUES (0, 'DEFAULT', 'ACTIVE')
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- STORED FILES TABLE (HAPI binary storage)
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
CREATE TABLE IF NOT EXISTS HFJ_BINARY_STORAGE_BLOB (
|
||||
BLOB_ID VARCHAR(200) NOT NULL,
|
||||
RESOURCE_ID VARCHAR(100) NOT NULL,
|
||||
BLOB_SIZE INT,
|
||||
CONTENT_TYPE VARCHAR(100) NOT NULL,
|
||||
BLOB_DATA BYTEA NOT NULL,
|
||||
PUBLISHED_DATE TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
BLOB_HASH VARCHAR(128),
|
||||
CONSTRAINT PK_HFJ_BINARY_STORAGE_BLOB PRIMARY KEY (BLOB_ID)
|
||||
);
|
||||
|
||||
-- ---------------------------------------------------------------------------
|
||||
-- COMMENTS FOR FUTURE MIGRATIONS
|
||||
-- ---------------------------------------------------------------------------
|
||||
|
||||
COMMENT ON TABLE HFJ_RESOURCE IS
|
||||
'Core FHIR resource table. Partition candidate at 10M+ rows. '
|
||||
'Suggested: PARTITION BY RANGE (RES_LAST_UPDATED) monthly. '
|
||||
'Prerequisites before partitioning: convert PK to composite, '
|
||||
'update all FK references. Write V3 migration when threshold reached.';
|
||||
|
||||
COMMENT ON TABLE HFJ_SPIDX_TOKEN IS
|
||||
'Token search index. Highest write volume table — one row per coded '
|
||||
'element per resource. Partition candidate at 50M+ rows. '
|
||||
'Suggested: PARTITION BY HASH (RES_TYPE) with 8 partitions.';
|
||||
|
||||
COMMENT ON TABLE HFJ_RES_VER IS
|
||||
'Resource version content. Partition candidate at 10M+ rows. '
|
||||
'Suggested: PARTITION BY RANGE (RES_LAST_UPDATED) monthly.';
|
||||
182
hapi-overlay/src/main/resources/logback-spring.xml
Normal file
182
hapi-overlay/src/main/resources/logback-spring.xml
Normal file
@@ -0,0 +1,182 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
logback-spring.xml
|
||||
Structured JSON logging for ELK ingestion.
|
||||
Uses logstash-logback-encoder for JSON output.
|
||||
|
||||
Two appenders:
|
||||
CONSOLE_JSON — structured JSON to stdout (Docker captures this)
|
||||
FILE_JSON — structured JSON to /app/logs/ (Filebeat pickup)
|
||||
|
||||
Both appenders produce identical JSON format.
|
||||
Filebeat ships /app/logs/ to Logstash/Elasticsearch.
|
||||
Docker logs ship CONSOLE_JSON to local Docker log driver.
|
||||
|
||||
FIELD REFERENCE (all log entries contain):
|
||||
@timestamp — ISO8601 UTC
|
||||
level — TRACE/DEBUG/INFO/WARN/ERROR
|
||||
logger — logger name (class)
|
||||
thread — thread name
|
||||
message — log message
|
||||
application — bd-fhir-hapi
|
||||
environment — prod
|
||||
|
||||
ADDITIONAL FIELDS (where present):
|
||||
requestId — per-request correlation UUID (from KeycloakJwtInterceptor)
|
||||
clientId — Keycloak client_id
|
||||
sendingFacility — facility from token
|
||||
resourceType — FHIR resource type
|
||||
outcome — ACCEPTED/REJECTED
|
||||
rejectionCode — rejection code (for REJECTED events)
|
||||
durationMs — processing duration in milliseconds
|
||||
-->
|
||||
<configuration scan="false">
|
||||
|
||||
<!-- Spring Boot provides these properties via SpringBoot integration -->
|
||||
<springProperty scope="context" name="appName"
|
||||
source="spring.application.name" defaultValue="bd-fhir-hapi"/>
|
||||
<springProperty scope="context" name="activeProfile"
|
||||
source="spring.profiles.active" defaultValue="prod"/>
|
||||
|
||||
<!-- =========================================================
|
||||
JSON ENCODER — shared configuration
|
||||
========================================================= -->
|
||||
<appender name="CONSOLE_JSON"
|
||||
class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
|
||||
<!-- Standard fields -->
|
||||
<timestampPattern>yyyy-MM-dd'T'HH:mm:ss.SSSZZ</timestampPattern>
|
||||
<timeZone>UTC</timeZone>
|
||||
|
||||
<!-- Custom static fields on every log entry -->
|
||||
<customFields>{"application":"${appName}","environment":"${activeProfile}"}</customFields>
|
||||
|
||||
<!-- Include MDC fields — KeycloakJwtInterceptor sets these per-request -->
|
||||
<includeMdcKeyName>requestId</includeMdcKeyName>
|
||||
<includeMdcKeyName>clientId</includeMdcKeyName>
|
||||
<includeMdcKeyName>sendingFacility</includeMdcKeyName>
|
||||
<includeMdcKeyName>resourceType</includeMdcKeyName>
|
||||
<includeMdcKeyName>requestIp</includeMdcKeyName>
|
||||
|
||||
<!-- Shorten logger name for readability -->
|
||||
<shortenedLoggerNameLength>40</shortenedLoggerNameLength>
|
||||
|
||||
<!-- Do not include caller data (file/line number) — expensive -->
|
||||
<includeCallerData>false</includeCallerData>
|
||||
|
||||
<!-- Include exception as structured field, not embedded in message -->
|
||||
<throwableConverter class="net.logstash.logback.stacktrace.ShortenedThrowableConverter">
|
||||
<maxDepthPerCause>10</maxDepthPerCause>
|
||||
<maxLength>2048</maxLength>
|
||||
<rootCauseFirst>true</rootCauseFirst>
|
||||
</throwableConverter>
|
||||
</encoder>
|
||||
|
||||
<!-- Do not log below WARN from noisy HAPI internals even if
|
||||
root level is set lower. Override per-logger in application.yaml. -->
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
</appender>
|
||||
|
||||
<appender name="FILE_JSON"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>/app/logs/bd-fhir-hapi.log</file>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<!-- Daily rotation + size cap -->
|
||||
<fileNamePattern>/app/logs/bd-fhir-hapi.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>100MB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>1GB</totalSizeCap>
|
||||
</rollingPolicy>
|
||||
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
|
||||
<timestampPattern>yyyy-MM-dd'T'HH:mm:ss.SSSZZ</timestampPattern>
|
||||
<timeZone>UTC</timeZone>
|
||||
<customFields>{"application":"${appName}","environment":"${activeProfile}"}</customFields>
|
||||
<includeMdcKeyName>requestId</includeMdcKeyName>
|
||||
<includeMdcKeyName>clientId</includeMdcKeyName>
|
||||
<includeMdcKeyName>sendingFacility</includeMdcKeyName>
|
||||
<includeMdcKeyName>resourceType</includeMdcKeyName>
|
||||
<includeMdcKeyName>requestIp</includeMdcKeyName>
|
||||
<shortenedLoggerNameLength>40</shortenedLoggerNameLength>
|
||||
<includeCallerData>false</includeCallerData>
|
||||
<throwableConverter class="net.logstash.logback.stacktrace.ShortenedThrowableConverter">
|
||||
<maxDepthPerCause>10</maxDepthPerCause>
|
||||
<maxLength>2048</maxLength>
|
||||
<rootCauseFirst>true</rootCauseFirst>
|
||||
</throwableConverter>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- Async wrappers — log appends must not block FHIR request threads -->
|
||||
<appender name="ASYNC_CONSOLE"
|
||||
class="ch.qos.logback.classic.AsyncAppender">
|
||||
<appender-ref ref="CONSOLE_JSON"/>
|
||||
<!-- Queue capacity: 1000 log events before blocking. If the logging
|
||||
pipeline is slower than log production, this queue absorbs bursts. -->
|
||||
<queueSize>1000</queueSize>
|
||||
<!-- discardingThreshold=0: never discard log events even when queue
|
||||
is 80% full. Default is to discard INFO/DEBUG at 80% — unacceptable
|
||||
for audit-adjacent logs. -->
|
||||
<discardingThreshold>0</discardingThreshold>
|
||||
<includeCallerData>false</includeCallerData>
|
||||
</appender>
|
||||
|
||||
<appender name="ASYNC_FILE"
|
||||
class="ch.qos.logback.classic.AsyncAppender">
|
||||
<appender-ref ref="FILE_JSON"/>
|
||||
<queueSize>1000</queueSize>
|
||||
<discardingThreshold>0</discardingThreshold>
|
||||
<includeCallerData>false</includeCallerData>
|
||||
</appender>
|
||||
|
||||
<!-- =========================================================
|
||||
LOGGER CONFIGURATION
|
||||
Levels here are defaults — overridden by application.yaml
|
||||
logging.level.* properties which Logback reads at startup.
|
||||
========================================================= -->
|
||||
|
||||
<!-- Our application code — INFO and above -->
|
||||
<logger name="bd.gov.dghs" level="INFO" additivity="false">
|
||||
<appender-ref ref="ASYNC_CONSOLE"/>
|
||||
<appender-ref ref="ASYNC_FILE"/>
|
||||
</logger>
|
||||
|
||||
<!-- HAPI internals — WARN only. INFO from HAPI is extremely verbose
|
||||
and contains partial resource content. -->
|
||||
<logger name="ca.uhn.hapi.fhir" level="WARN" additivity="false">
|
||||
<appender-ref ref="ASYNC_CONSOLE"/>
|
||||
<appender-ref ref="ASYNC_FILE"/>
|
||||
</logger>
|
||||
|
||||
<!-- Flyway — INFO to capture migration progress at startup -->
|
||||
<logger name="org.flywaydb" level="INFO" additivity="false">
|
||||
<appender-ref ref="ASYNC_CONSOLE"/>
|
||||
<appender-ref ref="ASYNC_FILE"/>
|
||||
</logger>
|
||||
|
||||
<!-- Hibernate SQL — WARN. Never DEBUG in production. -->
|
||||
<logger name="org.hibernate.SQL" level="WARN" additivity="false">
|
||||
<appender-ref ref="ASYNC_CONSOLE"/>
|
||||
<appender-ref ref="ASYNC_FILE"/>
|
||||
</logger>
|
||||
|
||||
<!-- HikariCP pool events — WARN -->
|
||||
<logger name="com.zaxxer.hikari" level="WARN" additivity="false">
|
||||
<appender-ref ref="ASYNC_CONSOLE"/>
|
||||
<appender-ref ref="ASYNC_FILE"/>
|
||||
</logger>
|
||||
|
||||
<!-- Spring framework — WARN -->
|
||||
<logger name="org.springframework" level="WARN" additivity="false">
|
||||
<appender-ref ref="ASYNC_CONSOLE"/>
|
||||
<appender-ref ref="ASYNC_FILE"/>
|
||||
</logger>
|
||||
|
||||
<!-- Root logger — WARN for everything else -->
|
||||
<root level="WARN">
|
||||
<appender-ref ref="ASYNC_CONSOLE"/>
|
||||
<appender-ref ref="ASYNC_FILE"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
0
hapi-overlay/src/main/resources/packages/.gitkeep
Normal file
0
hapi-overlay/src/main/resources/packages/.gitkeep
Normal file
262
ops/adding-additional-igs.md
Normal file
262
ops/adding-additional-igs.md
Normal file
@@ -0,0 +1,262 @@
|
||||
# Adding Additional Implementation Guides
|
||||
|
||||
**Audience:** DGHS FHIR development and operations team
|
||||
**Applies to:** Any IG added after BD Core FHIR IG v0.2.1
|
||||
**Current IGs:** BD Core (`https://fhir.dghs.gov.bd/core`)
|
||||
**Planned IGs:** MCCoD (`https://fhir.dghs.gov.bd/mccod`), IMCI (`https://fhir.dghs.gov.bd/imci`)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Each DGHS Implementation Guide has its own canonical URL namespace:
|
||||
|
||||
| IG | Canonical base | Package naming convention |
|
||||
|----|---------------|--------------------------|
|
||||
| BD Core | `https://fhir.dghs.gov.bd/core` | `bd.gov.dghs.core-{version}.tgz` |
|
||||
| MCCoD | `https://fhir.dghs.gov.bd/mccod` | `bd.gov.dghs.mccod-{version}.tgz` |
|
||||
| IMCI | `https://fhir.dghs.gov.bd/imci` | `bd.gov.dghs.imci-{version}.tgz` |
|
||||
|
||||
Separate canonical namespaces mean profiles from different IGs never collide regardless of resource type overlap. A `Composition` profiled in MCCoD at `https://fhir.dghs.gov.bd/mccod/StructureDefinition/mccod-composition` and a `Composition` profiled in a future Core IG extension are completely independent. HAPI validates a resource against whichever profile URL it declares in `meta.profile`.
|
||||
|
||||
All packages are loaded into a single `NpmPackageValidationSupport` instance. HAPI merges them into one validation context at startup. There is no performance penalty for multiple IGs — profiles are loaded once into memory and reused across all validation calls.
|
||||
|
||||
---
|
||||
|
||||
## What changes when adding a new IG
|
||||
|
||||
### 1. `packages/` directory
|
||||
|
||||
Place the new IG `.tgz` alongside the existing core IG package:
|
||||
|
||||
```
|
||||
hapi-overlay/src/main/resources/packages/
|
||||
├── bd.gov.dghs.core-0.2.1.tgz ← existing
|
||||
├── bd.gov.dghs.mccod-1.0.0.tgz ← new
|
||||
└── bd.gov.dghs.imci-1.0.0.tgz ← new
|
||||
```
|
||||
|
||||
Only one version of each IG per image. If you are upgrading an existing IG, remove the old `.tgz` and place the new one.
|
||||
|
||||
### 2. `FhirServerConfig.java` — load the new package
|
||||
|
||||
Find the `npmPackageValidationSupport()` bean and add a `loadPackageFromClasspath()` call for each new IG:
|
||||
|
||||
```java
|
||||
@Bean
|
||||
public NpmPackageValidationSupport npmPackageValidationSupport(FhirContext fhirContext) {
|
||||
NpmPackageValidationSupport support = new NpmPackageValidationSupport(fhirContext);
|
||||
|
||||
// BD Core IG — always present
|
||||
support.loadPackageFromClasspath(
|
||||
"classpath:packages/bd.gov.dghs.core-0.2.1.tgz");
|
||||
|
||||
// MCCoD IG — add when deploying
|
||||
support.loadPackageFromClasspath(
|
||||
"classpath:packages/bd.gov.dghs.mccod-1.0.0.tgz");
|
||||
|
||||
// IMCI IG — add when deploying
|
||||
support.loadPackageFromClasspath(
|
||||
"classpath:packages/bd.gov.dghs.imci-1.0.0.tgz");
|
||||
|
||||
return support;
|
||||
}
|
||||
```
|
||||
|
||||
### 3. `FhirServerConfig.java` — register new resource types
|
||||
|
||||
The `BD_CORE_PROFILE_RESOURCE_TYPES` set determines which resource types receive full profile validation versus the `unvalidated-profile` tag. Add every resource type that any of your IGs profiles:
|
||||
|
||||
```java
|
||||
private static final Set<String> BD_CORE_PROFILE_RESOURCE_TYPES = Set.of(
|
||||
|
||||
// BD Core IG
|
||||
"Patient", "Condition", "Encounter", "Observation",
|
||||
"Practitioner", "Organization", "Location",
|
||||
"Medication", "MedicationRequest", "Immunization",
|
||||
|
||||
// MCCoD IG — add the resource types your MCCoD IG profiles
|
||||
"Composition", "MedicationStatement",
|
||||
|
||||
// IMCI IG — add the resource types your IMCI IG profiles
|
||||
"QuestionnaireResponse", "ClinicalImpression"
|
||||
);
|
||||
```
|
||||
|
||||
If a resource type appears in multiple IGs (e.g., `Composition` in both MCCoD and a future Core extension), add it once. HAPI validates against whichever profile URL the submitted resource declares — it does not matter that multiple profiles for that type are loaded.
|
||||
|
||||
### 4. `IgPackageInitializer.java` — load metadata for each new package
|
||||
|
||||
The initialiser currently loads one package under an advisory lock. Extend it to load each package. The advisory lock pattern remains the same — one lock per package, identified by package ID:
|
||||
|
||||
```java
|
||||
@Override
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
loadIgPackage(
|
||||
"classpath:packages/bd.gov.dghs.core-0.2.1.tgz",
|
||||
"bd.gov.dghs.core", "0.2.1");
|
||||
|
||||
loadIgPackage(
|
||||
"classpath:packages/bd.gov.dghs.mccod-1.0.0.tgz",
|
||||
"bd.gov.dghs.mccod", "1.0.0");
|
||||
|
||||
loadIgPackage(
|
||||
"classpath:packages/bd.gov.dghs.imci-1.0.0.tgz",
|
||||
"bd.gov.dghs.imci", "1.0.0");
|
||||
}
|
||||
|
||||
private void loadIgPackage(
|
||||
String classpathPath,
|
||||
String packageId,
|
||||
String version) throws Exception {
|
||||
|
||||
long lockKey = deriveLockKey(packageId);
|
||||
// ... same advisory lock acquisition logic as current implementation
|
||||
// ... same performIgLoad() call
|
||||
// Each package gets its own independent advisory lock key
|
||||
// so packages load concurrently across replicas without blocking each other
|
||||
}
|
||||
```
|
||||
|
||||
### 5. `application.yaml` — add new IG configuration entries
|
||||
|
||||
Under the `bd.fhir.ig` section, add entries for the new packages. This makes IG paths configurable without recompiling:
|
||||
|
||||
```yaml
|
||||
bd:
|
||||
fhir:
|
||||
ig:
|
||||
packages:
|
||||
- classpath: classpath:packages/bd.gov.dghs.core-0.2.1.tgz
|
||||
id: bd.gov.dghs.core
|
||||
version: 0.2.1
|
||||
- classpath: classpath:packages/bd.gov.dghs.mccod-1.0.0.tgz
|
||||
id: bd.gov.dghs.mccod
|
||||
version: 1.0.0
|
||||
- classpath: classpath:packages/bd.gov.dghs.imci-1.0.0.tgz
|
||||
id: bd.gov.dghs.imci
|
||||
version: 1.0.0
|
||||
```
|
||||
|
||||
Update `FhirServerConfig.java` to read this list and loop over it rather than having hardcoded paths. This means adding a new IG in future requires only a config change and new `.tgz` — no Java code change.
|
||||
|
||||
### 6. `.env` — add new IG version variables
|
||||
|
||||
Add version tracking variables for operational visibility and for the `/actuator/info` endpoint:
|
||||
|
||||
```bash
|
||||
# BD Core IG
|
||||
HAPI_IG_CORE_VERSION=0.2.1
|
||||
|
||||
# MCCoD IG
|
||||
HAPI_IG_MCCOD_VERSION=1.0.0
|
||||
|
||||
# IMCI IG
|
||||
HAPI_IG_IMCI_VERSION=1.0.0
|
||||
```
|
||||
|
||||
### 7. Gitea workflow — add new IG package secrets
|
||||
|
||||
For each new IG, add a Gitea secret and decode it in the build step:
|
||||
|
||||
**Gitea → Repository → Settings → Secrets — add:**
|
||||
|
||||
| Secret | Value |
|
||||
|--------|-------|
|
||||
| `MCCOD_PACKAGE_B64` | `base64 -w 0 bd.gov.dghs.mccod-1.0.0.tgz` |
|
||||
| `IMCI_PACKAGE_B64` | `base64 -w 0 bd.gov.dghs.imci-1.0.0.tgz` |
|
||||
|
||||
**Gitea → Repository → Settings → Variables — add:**
|
||||
|
||||
| Variable | Value |
|
||||
|----------|-------|
|
||||
| `MCCOD_PACKAGE_FILENAME` | `bd.gov.dghs.mccod-1.0.0.tgz` |
|
||||
| `IMCI_PACKAGE_FILENAME` | `bd.gov.dghs.imci-1.0.0.tgz` |
|
||||
|
||||
**In `.gitea/workflows/build.yml` — extend the IG placement step:**
|
||||
|
||||
```yaml
|
||||
- name: Place IG packages for build
|
||||
run: |
|
||||
echo "${{ secrets.IG_PACKAGE_B64 }}" | base64 -d > \
|
||||
hapi-overlay/src/main/resources/packages/${{ vars.IG_PACKAGE_FILENAME }}
|
||||
|
||||
echo "${{ secrets.MCCOD_PACKAGE_B64 }}" | base64 -d > \
|
||||
hapi-overlay/src/main/resources/packages/${{ vars.MCCOD_PACKAGE_FILENAME }}
|
||||
|
||||
echo "${{ secrets.IMCI_PACKAGE_B64 }}" | base64 -d > \
|
||||
hapi-overlay/src/main/resources/packages/${{ vars.IMCI_PACKAGE_FILENAME }}
|
||||
|
||||
echo "Packages placed:"
|
||||
ls -lh hapi-overlay/src/main/resources/packages/
|
||||
```
|
||||
|
||||
**And extend the cleanup step:**
|
||||
|
||||
```yaml
|
||||
- name: Clean up IG packages from workspace
|
||||
if: always()
|
||||
run: rm -f hapi-overlay/src/main/resources/packages/*.tgz
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What does not change
|
||||
|
||||
| Component | Reason |
|
||||
|-----------|--------|
|
||||
| Validation chain order | `NpmPackageValidationSupport` handles all loaded IGs transparently |
|
||||
| OCL integration | `BdTerminologyValidationSupport` intercepts only `http://id.who.int/icd/release/11/mms` — other systems in any IG route through normally |
|
||||
| Cluster expression validator | ICD-11 specific — unaffected by other IGs |
|
||||
| Keycloak auth | No change — all vendors use `mci-api` role regardless of which IG they submit against |
|
||||
| Audit tables | Schema is resource-type agnostic — new resource types are captured automatically |
|
||||
| PostgreSQL schema | No migration needed — HAPI JPA stores all FHIR R4 resource types in the same tables |
|
||||
| pgBouncer, nginx proxy config | Infrastructure is IG-agnostic |
|
||||
|
||||
---
|
||||
|
||||
## Terminology considerations for new IGs
|
||||
|
||||
If MCCoD or IMCI IGs introduce coded elements using systems **other than ICD-11** that are already in your OCL instance (e.g., LOINC, drug ValueSets), no additional configuration is needed. `BdTerminologyValidationSupport` only handles ICD-11. All other systems fall through to HAPI's standard remote terminology mechanism which already calls OCL.
|
||||
|
||||
If a new IG introduces a **new terminology system** not currently in OCL:
|
||||
|
||||
1. Import the new system into OCL first.
|
||||
2. Verify OCL `$validate-code` works for the new system: `curl "https://tr.ocl.dghs.gov.bd/api/fhir/CodeSystem/$validate-code?system={new-system-url}&code={test-code}"`
|
||||
3. No HAPI code changes needed — HAPI's remote terminology support handles any system OCL knows about.
|
||||
|
||||
If a new IG introduces a terminology system that will **never be in OCL** (e.g., a purely local ValueSet defined within the IG itself), HAPI will validate it using `InMemoryTerminologyServerValidationSupport` from the concepts loaded with the IG package. No external call is made.
|
||||
|
||||
---
|
||||
|
||||
## Upgrade procedure for an existing specialised IG
|
||||
|
||||
When MCCoD advances from v1.0.0 to v1.1.0:
|
||||
|
||||
1. Place `bd.gov.dghs.mccod-1.1.0.tgz` in `packages/`, remove `bd.gov.dghs.mccod-1.0.0.tgz`.
|
||||
2. Update the package path in `FhirServerConfig.java` (or in `application.yaml` if you implemented the config-driven approach from Step 5 above).
|
||||
3. Update `MCCOD_PACKAGE_FILENAME` Gitea variable to `bd.gov.dghs.mccod-1.1.0.tgz`.
|
||||
4. Update `MCCOD_PACKAGE_B64` Gitea secret with the new package base64.
|
||||
5. Tag and push — CI builds and pushes the new image.
|
||||
6. Deploy the new image on the production server.
|
||||
|
||||
If the IG upgrade changes terminology ValueSets in OCL (new codes, reclassified codes), follow the cache flush procedure in `ops/version-upgrade-integration.md` after deployment.
|
||||
|
||||
---
|
||||
|
||||
## Deployment checklist for a new IG
|
||||
|
||||
- [ ] New IG `.tgz` placed in `packages/`, filename follows naming convention
|
||||
- [ ] `FhirServerConfig.java` — `npmPackageValidationSupport()` loads new package
|
||||
- [ ] `FhirServerConfig.java` — `BD_CORE_PROFILE_RESOURCE_TYPES` updated with new resource types
|
||||
- [ ] `IgPackageInitializer.java` — new package included in initialisation loop
|
||||
- [ ] `application.yaml` — new IG entry added under `bd.fhir.ig.packages`
|
||||
- [ ] `.env` — new IG version variable added
|
||||
- [ ] Gitea secrets — new `*_PACKAGE_B64` secret created
|
||||
- [ ] Gitea variables — new `*_PACKAGE_FILENAME` variable created
|
||||
- [ ] Gitea workflow — new package decode and cleanup steps added
|
||||
- [ ] New image built, pushed, deployed
|
||||
- [ ] Acceptance test: submit a resource claiming the new IG profile → 201 accepted
|
||||
- [ ] Acceptance test: submit a resource violating the new IG profile → 422 rejected
|
||||
- [ ] Acceptance test: existing Core IG submissions still work → 201 accepted
|
||||
- [ ] Vendors notified of new IG availability and profile URLs
|
||||
894
ops/deployment-guide.md
Normal file
894
ops/deployment-guide.md
Normal file
@@ -0,0 +1,894 @@
|
||||
# BD FHIR National — Production Deployment Guide
|
||||
|
||||
**Target OS:** Ubuntu 22.04 LTS
|
||||
**Audience:** DGHS infrastructure team
|
||||
**Estimated time:** 90 minutes first deployment, 15 minutes subsequent upgrades
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites checklist
|
||||
|
||||
Before starting, confirm all of the following:
|
||||
|
||||
- [ ] Ubuntu 22.04 LTS server provisioned with minimum 8GB RAM, 4 vCPU, 100GB disk
|
||||
- [ ] Server has outbound HTTPS access to:
|
||||
- `auth.dghs.gov.bd` (Keycloak)
|
||||
- `tr.ocl.dghs.gov.bd` (OCL)
|
||||
- `icd11.dghs.gov.bd` (cluster validator)
|
||||
- Your private Docker registry
|
||||
- [ ] TLS certificates provisioned at paths matching `.env` `TLS_CERT_PATH` / `TLS_KEY_PATH`
|
||||
- [ ] Keycloak `hris` realm configured per `ops/keycloak-setup.md`
|
||||
- [ ] BD Core IG `bd.gov.dghs.core-0.2.1.tgz` present in `hapi-overlay/src/main/resources/packages/` on CI machine
|
||||
- [ ] CI machine has built and pushed the Docker image to private registry
|
||||
- [ ] `.env` file prepared from `.env.example` with all secrets filled in
|
||||
|
||||
---
|
||||
|
||||
## Part 1 — Server preparation
|
||||
|
||||
### 1.1 — Install Docker Engine
|
||||
|
||||
```bash
|
||||
# Remove any conflicting packages
|
||||
for pkg in docker.io docker-doc docker-compose docker-compose-v2 \
|
||||
podman-docker containerd runc; do
|
||||
sudo apt-get remove -y $pkg 2>/dev/null
|
||||
done
|
||||
|
||||
# Add Docker's official GPG key
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y ca-certificates curl
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg \
|
||||
-o /etc/apt/keyrings/docker.asc
|
||||
sudo chmod a+r /etc/apt/keyrings/docker.asc
|
||||
|
||||
# Add Docker repository
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) \
|
||||
signed-by=/etc/apt/keyrings/docker.asc] \
|
||||
https://download.docker.com/linux/ubuntu \
|
||||
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
|
||||
# Install Docker Engine and Compose plugin
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y docker-ce docker-ce-cli containerd.io \
|
||||
docker-buildx-plugin docker-compose-plugin
|
||||
|
||||
# Verify
|
||||
docker --version # Docker Engine 25.x or higher
|
||||
docker compose version # Docker Compose v2.x or higher
|
||||
```
|
||||
|
||||
### 1.2 — Configure Docker daemon
|
||||
|
||||
```bash
|
||||
# Create daemon config: limit log size, set storage driver
|
||||
sudo tee /etc/docker/daemon.json <<'EOF'
|
||||
{
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "100m",
|
||||
"max-file": "5"
|
||||
},
|
||||
"storage-driver": "overlay2",
|
||||
"live-restore": true
|
||||
}
|
||||
EOF
|
||||
|
||||
sudo systemctl restart docker
|
||||
sudo systemctl enable docker
|
||||
|
||||
# Add your deploy user to the docker group (avoids sudo on every docker command)
|
||||
sudo usermod -aG docker $USER
|
||||
# Log out and back in for group membership to take effect
|
||||
```
|
||||
|
||||
### 1.3 — Create application directory
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /opt/bd-fhir-national
|
||||
sudo chown $USER:$USER /opt/bd-fhir-national
|
||||
cd /opt/bd-fhir-national
|
||||
```
|
||||
|
||||
### 1.4 — Deploy project files
|
||||
|
||||
Copy the entire project directory to the server. Recommended approach:
|
||||
|
||||
```bash
|
||||
# From your CI/deployment machine:
|
||||
rsync -avz --exclude='.git' \
|
||||
--exclude='hapi-overlay/target' \
|
||||
--exclude='hapi-overlay/src' \
|
||||
./bd-fhir-national/ \
|
||||
deploy@your-server:/opt/bd-fhir-national/
|
||||
|
||||
# The server needs:
|
||||
# /opt/bd-fhir-national/
|
||||
# ├── docker-compose.yml
|
||||
# ├── .env ← you create this (see 1.5)
|
||||
# ├── nginx/nginx.conf
|
||||
# ├── postgres/fhir/postgresql.conf
|
||||
# ├── postgres/fhir/init.sql
|
||||
# ├── postgres/audit/postgresql.conf
|
||||
# └── postgres/audit/init.sql
|
||||
#
|
||||
# The hapi-overlay/ source tree does NOT need to be on the production server.
|
||||
# Only the Docker image (pre-built and pushed to registry) is needed.
|
||||
```
|
||||
|
||||
### 1.5 — Create .env file
|
||||
|
||||
```bash
|
||||
cd /opt/bd-fhir-national
|
||||
cp .env.example .env
|
||||
chmod 600 .env # restrict to owner only — contains secrets
|
||||
|
||||
# Edit .env with actual values
|
||||
nano .env
|
||||
```
|
||||
|
||||
**Required values in .env:**
|
||||
|
||||
```bash
|
||||
# Docker image — must match what CI pushed
|
||||
HAPI_IMAGE=your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0
|
||||
|
||||
# FHIR database
|
||||
FHIR_DB_NAME=fhirdb
|
||||
FHIR_DB_SUPERUSER=postgres
|
||||
FHIR_DB_SUPERUSER_PASSWORD=$(openssl rand -base64 32)
|
||||
FHIR_DB_APP_USER=hapi_app
|
||||
FHIR_DB_APP_PASSWORD=$(openssl rand -base64 32)
|
||||
|
||||
# Audit database
|
||||
AUDIT_DB_NAME=auditdb
|
||||
AUDIT_DB_SUPERUSER=postgres
|
||||
AUDIT_DB_SUPERUSER_PASSWORD=$(openssl rand -base64 32)
|
||||
AUDIT_DB_WRITER_USER=audit_writer_login
|
||||
AUDIT_DB_WRITER_PASSWORD=$(openssl rand -base64 32)
|
||||
AUDIT_DB_MAINTAINER_USER=audit_maintainer_login
|
||||
AUDIT_DB_MAINTAINER_PASSWORD=$(openssl rand -base64 32)
|
||||
|
||||
# TLS certificate paths (absolute paths on this server)
|
||||
TLS_CERT_PATH=/etc/ssl/dghs/fhir.dghs.gov.bd.crt
|
||||
TLS_KEY_PATH=/etc/ssl/dghs/fhir.dghs.gov.bd.key
|
||||
```
|
||||
|
||||
> **Security:** Never commit `.env` to version control. Store the filled
|
||||
> `.env` in your secrets vault (HashiCorp Vault, AWS SSM, or encrypted backup).
|
||||
> Verify permissions after creation: `ls -la .env` should show `-rw-------`.
|
||||
|
||||
### 1.6 — Fix PostgreSQL init script password injection
|
||||
|
||||
The `postgres/audit/init.sql` file contains placeholder passwords.
|
||||
PostgreSQL's Docker entrypoint does not perform variable substitution in
|
||||
`.sql` init files — only `.sh` files. Replace the init SQL with a shell script:
|
||||
|
||||
```bash
|
||||
# Create shell-based init script for audit database
|
||||
cat > /opt/bd-fhir-national/postgres/audit/init.sh <<'INITSCRIPT'
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Load passwords from environment variables
|
||||
# (These env vars are set in docker-compose.yml from .env)
|
||||
WRITER_USER="${AUDIT_DB_WRITER_USER:-audit_writer_login}"
|
||||
WRITER_PASS="${AUDIT_DB_WRITER_PASSWORD}"
|
||||
MAINTAINER_USER="${AUDIT_DB_MAINTAINER_USER:-audit_maintainer_login}"
|
||||
MAINTAINER_PASS="${AUDIT_DB_MAINTAINER_PASSWORD}"
|
||||
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
-- Create writer login user
|
||||
DO \$\$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${WRITER_USER}') THEN
|
||||
CREATE USER ${WRITER_USER}
|
||||
WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT LOGIN
|
||||
CONNECTION LIMIT 20
|
||||
PASSWORD '${WRITER_PASS}';
|
||||
END IF;
|
||||
END
|
||||
\$\$;
|
||||
|
||||
-- Create maintainer login user
|
||||
DO \$\$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${MAINTAINER_USER}') THEN
|
||||
CREATE USER ${MAINTAINER_USER}
|
||||
WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT LOGIN
|
||||
CONNECTION LIMIT 5
|
||||
PASSWORD '${MAINTAINER_PASS}';
|
||||
END IF;
|
||||
END
|
||||
\$\$;
|
||||
|
||||
GRANT CONNECT ON DATABASE ${POSTGRES_DB} TO ${WRITER_USER};
|
||||
GRANT CONNECT ON DATABASE ${POSTGRES_DB} TO ${MAINTAINER_USER};
|
||||
EOSQL
|
||||
INITSCRIPT
|
||||
|
||||
chmod +x /opt/bd-fhir-national/postgres/audit/init.sh
|
||||
```
|
||||
|
||||
Update `docker-compose.yml` to mount `init.sh` instead of `init.sql` for
|
||||
the `postgres-audit` service:
|
||||
|
||||
```yaml
|
||||
# In postgres-audit volumes: section, change:
|
||||
# - ./postgres/audit/init.sql:/docker-entrypoint-initdb.d/init.sql:ro
|
||||
# To:
|
||||
- ./postgres/audit/init.sh:/docker-entrypoint-initdb.d/init.sh:ro
|
||||
```
|
||||
|
||||
Also pass the audit user environment variables to `postgres-audit`:
|
||||
|
||||
```yaml
|
||||
# In postgres-audit environment: section, add:
|
||||
AUDIT_DB_WRITER_USER: ${AUDIT_DB_WRITER_USER}
|
||||
AUDIT_DB_WRITER_PASSWORD: ${AUDIT_DB_WRITER_PASSWORD}
|
||||
AUDIT_DB_MAINTAINER_USER: ${AUDIT_DB_MAINTAINER_USER}
|
||||
AUDIT_DB_MAINTAINER_PASSWORD: ${AUDIT_DB_MAINTAINER_PASSWORD}
|
||||
```
|
||||
|
||||
Similarly for `postgres-fhir`, create `init.sh`:
|
||||
|
||||
```bash
|
||||
cat > /opt/bd-fhir-national/postgres/fhir/init.sh <<'INITSCRIPT'
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
APP_USER="${FHIR_DB_APP_USER:-hapi_app}"
|
||||
APP_PASS="${FHIR_DB_APP_PASSWORD}"
|
||||
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
DO \$\$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${APP_USER}') THEN
|
||||
CREATE USER ${APP_USER}
|
||||
WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT LOGIN
|
||||
CONNECTION LIMIT 30
|
||||
PASSWORD '${APP_PASS}';
|
||||
END IF;
|
||||
END
|
||||
\$\$;
|
||||
|
||||
GRANT CONNECT ON DATABASE ${POSTGRES_DB} TO ${APP_USER};
|
||||
GRANT USAGE ON SCHEMA public TO ${APP_USER};
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO ${APP_USER};
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public
|
||||
GRANT USAGE, SELECT ON SEQUENCES TO ${APP_USER};
|
||||
EOSQL
|
||||
INITSCRIPT
|
||||
|
||||
chmod +x /opt/bd-fhir-national/postgres/fhir/init.sh
|
||||
```
|
||||
|
||||
### 1.7 — Authenticate with private Docker registry
|
||||
|
||||
```bash
|
||||
# Log in to your private registry
|
||||
docker login your-registry.dghs.gov.bd \
|
||||
--username ${REGISTRY_USER} \
|
||||
--password-stdin <<< "${REGISTRY_PASSWORD}"
|
||||
|
||||
# Verify the login persisted
|
||||
cat ~/.docker/config.json | jq '.auths | keys'
|
||||
# Should include "your-registry.dghs.gov.bd"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 2 — First deployment
|
||||
|
||||
### 2.1 — Pull images
|
||||
|
||||
```bash
|
||||
cd /opt/bd-fhir-national
|
||||
|
||||
# Pull all images declared in docker-compose.yml
|
||||
docker compose --env-file .env pull
|
||||
|
||||
# Verify images are present locally
|
||||
docker images | grep -E "hapi|postgres|pgbouncer|nginx"
|
||||
```
|
||||
|
||||
### 2.2 — Start infrastructure services first
|
||||
|
||||
Start databases before HAPI. HAPI's `depends_on` with `condition: service_healthy`
|
||||
handles this automatically, but starting manually in stages helps isolate
|
||||
any first-run issues.
|
||||
|
||||
```bash
|
||||
# Start databases
|
||||
docker compose --env-file .env up -d postgres-fhir postgres-audit
|
||||
|
||||
# Wait for health checks to pass (up to 60 seconds)
|
||||
echo "Waiting for PostgreSQL to be ready..."
|
||||
until docker compose --env-file .env ps postgres-fhir \
|
||||
| grep -q "healthy"; do
|
||||
sleep 3
|
||||
echo -n "."
|
||||
done
|
||||
echo ""
|
||||
echo "postgres-fhir: healthy"
|
||||
|
||||
until docker compose --env-file .env ps postgres-audit \
|
||||
| grep -q "healthy"; do
|
||||
sleep 3
|
||||
echo -n "."
|
||||
done
|
||||
echo ""
|
||||
echo "postgres-audit: healthy"
|
||||
```
|
||||
|
||||
### 2.3 — Verify PostgreSQL user creation
|
||||
|
||||
```bash
|
||||
# Verify FHIR app user was created
|
||||
docker exec bd-postgres-fhir psql -U postgres -d fhirdb -c \
|
||||
"SELECT rolname, rolcanlogin FROM pg_roles WHERE rolname = 'hapi_app';"
|
||||
# Expected: hapi_app | t
|
||||
|
||||
# Verify audit writer user was created
|
||||
docker exec bd-postgres-audit psql -U postgres -d auditdb -c \
|
||||
"SELECT rolname, rolcanlogin FROM pg_roles WHERE rolname = 'audit_writer_login';"
|
||||
# Expected: audit_writer_login | t
|
||||
```
|
||||
|
||||
### 2.4 — Start pgBouncer
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env up -d pgbouncer-fhir pgbouncer-audit
|
||||
|
||||
# Verify pgBouncer is healthy
|
||||
until docker compose --env-file .env ps pgbouncer-fhir \
|
||||
| grep -q "healthy"; do
|
||||
sleep 3
|
||||
done
|
||||
echo "pgbouncer-fhir: healthy"
|
||||
```
|
||||
|
||||
### 2.5 — Start HAPI (first replica)
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env up -d hapi
|
||||
|
||||
# Follow startup logs — this takes 60-120 seconds on first run
|
||||
# Watch for these key log events in order:
|
||||
# 1. "Running FHIR Flyway migrations" — V1 schema creation
|
||||
# 2. "Running Audit Flyway migrations" — V2 audit schema creation
|
||||
# 3. "Advisory lock acquired" — IG package initialisation begins
|
||||
# 4. "BD Core IG package loaded successfully" — IG loaded
|
||||
# 5. "BdTerminologyValidationSupport initialised" — OCL integration ready
|
||||
# 6. "KeycloakJwtInterceptor initialised" — JWT validation ready
|
||||
# 7. "HAPI RestfulServer interceptors registered" — server ready
|
||||
# 8. Spring Boot startup completion message with port 8080
|
||||
|
||||
docker compose --env-file .env logs -f hapi
|
||||
# Press Ctrl+C when you see the startup completion message
|
||||
```
|
||||
|
||||
**Expected startup log sequence (key lines only):**
|
||||
```
|
||||
INFO o.f.core.internal.command.DbMigrate - Running FHIR Flyway migrations
|
||||
INFO o.f.core.internal.command.DbMigrate - Successfully applied 1 migration to schema "public"
|
||||
INFO o.f.core.internal.command.DbMigrate - Running Audit Flyway migrations
|
||||
INFO o.f.core.internal.command.DbMigrate - Successfully applied 1 migration to schema "audit"
|
||||
INFO b.g.d.f.init.IgPackageInitializer - Advisory lock acquired: lockKey=... waitedMs=...
|
||||
INFO b.g.d.f.init.IgPackageInitializer - BD Core IG package loaded successfully: version=0.2.1
|
||||
INFO b.g.d.f.t.BdTerminologyValidationSupport - BdTerminologyValidationSupport initialised
|
||||
INFO b.g.d.f.i.KeycloakJwtInterceptor - KeycloakJwtInterceptor initialised
|
||||
INFO b.g.d.f.c.SecurityConfig - HAPI RestfulServer interceptors registered
|
||||
INFO o.s.b.w.e.t.TomcatWebServer - Tomcat started on port(s): 8080
|
||||
INFO b.g.d.f.BdFhirApplication - Started BdFhirApplication in XX.XXX seconds
|
||||
```
|
||||
|
||||
### 2.6 — Start nginx
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env up -d nginx
|
||||
|
||||
# Verify nginx started without config errors
|
||||
docker compose --env-file .env logs nginx | tail -20
|
||||
# Should NOT contain: [emerg] or [crit] — only [notice] lines
|
||||
|
||||
# Verify nginx health
|
||||
docker compose --env-file .env ps nginx
|
||||
# Status should be: Up (healthy)
|
||||
```
|
||||
|
||||
### 2.7 — Verify full stack health
|
||||
|
||||
```bash
|
||||
# Internal health check (bypasses nginx, hits HAPI directly)
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s http://localhost:8080/actuator/health | jq .
|
||||
|
||||
# Expected output:
|
||||
# {
|
||||
# "status": "UP",
|
||||
# "components": {
|
||||
# "db": { "status": "UP" },
|
||||
# "auditDb": { "status": "UP" },
|
||||
# "ocl": { "status": "UP" },
|
||||
# "livenessState": { "status": "UP" },
|
||||
# "readinessState": { "status": "UP" }
|
||||
# }
|
||||
# }
|
||||
|
||||
# External health check (through nginx + TLS)
|
||||
curl -s https://fhir.dghs.gov.bd/actuator/health/liveness | jq .
|
||||
# Expected: { "status": "UP" }
|
||||
|
||||
# FHIR metadata endpoint (unauthenticated)
|
||||
curl -s https://fhir.dghs.gov.bd/fhir/metadata | jq '{
|
||||
resourceType,
|
||||
fhirVersion,
|
||||
software: .software,
|
||||
implementation: .implementation
|
||||
}'
|
||||
# Expected:
|
||||
# {
|
||||
# "resourceType": "CapabilityStatement",
|
||||
# "fhirVersion": "4.0.1",
|
||||
# "software": { "name": "BD FHIR National Repository", "version": "0.2.1" }
|
||||
# }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 3 — Phase 2 acceptance tests
|
||||
|
||||
Run all seven tests before declaring the deployment production-ready.
|
||||
Each test includes the expected HTTP status, expected response body shape,
|
||||
and what to check in the audit log if the test fails.
|
||||
|
||||
### Setup: obtain a vendor test token
|
||||
|
||||
```bash
|
||||
VENDOR_TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-vendor-TEST-FAC-001" \
|
||||
-d "client_secret=${TEST_VENDOR_SECRET}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
echo "Token obtained: ${VENDOR_TOKEN:0:20}..."
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Test 1 — Valid Condition with valid ICD-11 code → 201
|
||||
|
||||
Submits a BD Core IG-compliant `bd-condition` resource with a valid
|
||||
ICD-11 Diagnosis-class code.
|
||||
|
||||
```bash
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Authorization: Bearer ${VENDOR_TOKEN}" \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{
|
||||
"resourceType": "Condition",
|
||||
"meta": {
|
||||
"profile": ["https://fhir.dghs.gov.bd/core/StructureDefinition/bd-condition"]
|
||||
},
|
||||
"clinicalStatus": {
|
||||
"coding": [{
|
||||
"system": "http://terminology.hl7.org/CodeSystem/condition-clinical",
|
||||
"code": "active"
|
||||
}]
|
||||
},
|
||||
"verificationStatus": {
|
||||
"coding": [{
|
||||
"system": "http://terminology.hl7.org/CodeSystem/condition-ver-status",
|
||||
"code": "confirmed"
|
||||
}]
|
||||
},
|
||||
"code": {
|
||||
"coding": [{
|
||||
"system": "http://id.who.int/icd/release/11/mms",
|
||||
"code": "1C62.0",
|
||||
"display": "Typhoid fever"
|
||||
}]
|
||||
},
|
||||
"subject": {
|
||||
"reference": "Patient/test-patient-001"
|
||||
},
|
||||
"recordedDate": "2025-03-01"
|
||||
}'
|
||||
```
|
||||
|
||||
**Expected:** `HTTP 201 Created` with `Location` header containing the new resource URL.
|
||||
|
||||
**If 422 instead:**
|
||||
- Check OCL connectivity: `curl https://tr.ocl.dghs.gov.bd/api/fhir/CodeSystem/$validate-code?system=http://id.who.int/icd/release/11/mms&code=1C62.0`
|
||||
- Check IG is loaded: `curl http://localhost:8080/actuator/health` — OCL component should be UP
|
||||
- Check HAPI logs for profile validation errors
|
||||
|
||||
---
|
||||
|
||||
### Test 2 — Invalid ICD-11 code → 422
|
||||
|
||||
```bash
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Authorization: Bearer ${VENDOR_TOKEN}" \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{
|
||||
"resourceType": "Condition",
|
||||
"meta": {
|
||||
"profile": ["https://fhir.dghs.gov.bd/core/StructureDefinition/bd-condition"]
|
||||
},
|
||||
"clinicalStatus": {
|
||||
"coding": [{"system": "http://terminology.hl7.org/CodeSystem/condition-clinical", "code": "active"}]
|
||||
},
|
||||
"verificationStatus": {
|
||||
"coding": [{"system": "http://terminology.hl7.org/CodeSystem/condition-ver-status", "code": "confirmed"}]
|
||||
},
|
||||
"code": {
|
||||
"coding": [{
|
||||
"system": "http://id.who.int/icd/release/11/mms",
|
||||
"code": "INVALID-CODE-99999",
|
||||
"display": "This code does not exist"
|
||||
}]
|
||||
},
|
||||
"subject": {"reference": "Patient/test-patient-001"},
|
||||
"recordedDate": "2025-03-01"
|
||||
}'
|
||||
```
|
||||
|
||||
**Expected:** `HTTP 422 Unprocessable Entity` with OperationOutcome containing:
|
||||
- `issue[0].severity`: `error`
|
||||
- `issue[0].diagnostics`: contains "INVALID-CODE-99999" and rejection reason
|
||||
- `issue[0].expression`: contains `Condition.code`
|
||||
|
||||
**Verify in audit table:**
|
||||
```sql
|
||||
SELECT rejection_code, rejection_reason, invalid_code, element_path
|
||||
FROM audit.fhir_rejected_submissions
|
||||
ORDER BY submission_time DESC LIMIT 1;
|
||||
-- Expected: TERMINOLOGY_INVALID_CODE | OCL rejected code... | INVALID-CODE-99999 | Condition.code...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Test 3 — Device-class ICD-11 code in Condition.code → 422
|
||||
|
||||
Device-class codes are valid ICD-11 codes but are not in the
|
||||
`bd-condition-icd11-diagnosis-valueset` (restricted to Diagnosis + Finding).
|
||||
|
||||
```bash
|
||||
# XA7RE2 is an example Device-class code in ICD-11 MMS
|
||||
# Verify it is Device-class in OCL before running this test:
|
||||
# curl "https://tr.ocl.dghs.gov.bd/api/fhir/CodeSystem/$lookup?system=http://id.who.int/icd/release/11/mms&code=XA7RE2"
|
||||
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Authorization: Bearer ${VENDOR_TOKEN}" \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{
|
||||
"resourceType": "Condition",
|
||||
"meta": {
|
||||
"profile": ["https://fhir.dghs.gov.bd/core/StructureDefinition/bd-condition"]
|
||||
},
|
||||
"clinicalStatus": {
|
||||
"coding": [{"system": "http://terminology.hl7.org/CodeSystem/condition-clinical", "code": "active"}]
|
||||
},
|
||||
"verificationStatus": {
|
||||
"coding": [{"system": "http://terminology.hl7.org/CodeSystem/condition-ver-status", "code": "confirmed"}]
|
||||
},
|
||||
"code": {
|
||||
"coding": [{
|
||||
"system": "http://id.who.int/icd/release/11/mms",
|
||||
"code": "XA7RE2",
|
||||
"display": "Device code — should be rejected"
|
||||
}]
|
||||
},
|
||||
"subject": {"reference": "Patient/test-patient-001"},
|
||||
"recordedDate": "2025-03-01"
|
||||
}'
|
||||
```
|
||||
|
||||
**Expected:** `HTTP 422` with OperationOutcome.
|
||||
Rejection code in audit: `TERMINOLOGY_INVALID_CLASS`.
|
||||
|
||||
**If 201 instead (code accepted):**
|
||||
- OCL ValueSet class restriction is not enforcing correctly
|
||||
- Verify the ValueSet collection in OCL has correct concept_class filter
|
||||
- Run: `python version_upgrade.py --verify-class-restriction`
|
||||
|
||||
---
|
||||
|
||||
### Test 4 — Profile violation (missing required field) → 422
|
||||
|
||||
Submits a Condition missing `clinicalStatus` which is required by `bd-condition` profile.
|
||||
|
||||
```bash
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Authorization: Bearer ${VENDOR_TOKEN}" \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{
|
||||
"resourceType": "Condition",
|
||||
"meta": {
|
||||
"profile": ["https://fhir.dghs.gov.bd/core/StructureDefinition/bd-condition"]
|
||||
},
|
||||
"code": {
|
||||
"coding": [{
|
||||
"system": "http://id.who.int/icd/release/11/mms",
|
||||
"code": "1C62.0"
|
||||
}]
|
||||
},
|
||||
"subject": {"reference": "Patient/test-patient-001"}
|
||||
}'
|
||||
```
|
||||
|
||||
**Expected:** `HTTP 422` with OperationOutcome referencing missing `clinicalStatus`.
|
||||
|
||||
**If 201 instead:**
|
||||
- BD Core IG is not loaded or profile is not enforcing `clinicalStatus` as required
|
||||
- Check startup logs for IG load success
|
||||
- Verify: `curl http://localhost:8080/fhir/StructureDefinition/bd-condition`
|
||||
|
||||
---
|
||||
|
||||
### Test 5 — No Bearer token → 401
|
||||
|
||||
```bash
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{"resourceType": "Condition"}'
|
||||
```
|
||||
|
||||
**Expected:** `HTTP 401` with `WWW-Authenticate` header and OperationOutcome.
|
||||
|
||||
```bash
|
||||
# Verify WWW-Authenticate header is present
|
||||
curl -s -I \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{"resourceType":"Condition"}' \
|
||||
| grep -i "www-authenticate"
|
||||
# Expected: WWW-Authenticate: Bearer realm="BD FHIR National Repository"...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Test 6 — Valid token but missing mci-api role → 401
|
||||
|
||||
Create a test client WITHOUT `mci-api` role in Keycloak for this test.
|
||||
Or use a token from a different realm.
|
||||
|
||||
```bash
|
||||
# Token from a client without mci-api role
|
||||
NO_ROLE_TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-test-no-role" \
|
||||
-d "client_secret=${TEST_NO_ROLE_SECRET}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Authorization: Bearer ${NO_ROLE_TOKEN}" \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{"resourceType": "Condition"}'
|
||||
```
|
||||
|
||||
**Expected:** `HTTP 401`.
|
||||
|
||||
**Verify in audit log:**
|
||||
```sql
|
||||
SELECT event_type, outcome_detail, client_id
|
||||
FROM audit.audit_events
|
||||
WHERE event_type = 'AUTH_FAILURE'
|
||||
ORDER BY event_time DESC LIMIT 1;
|
||||
-- Expected: AUTH_FAILURE | Required role 'mci-api' not present... | fhir-test-no-role
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Test 7 — Expired token → 401
|
||||
|
||||
```bash
|
||||
# An expired token is one whose 'exp' claim is in the past.
|
||||
# Easiest approach: obtain a token, wait for it to expire (default: 5 minutes),
|
||||
# then use it.
|
||||
#
|
||||
# For automated testing, forge an expired token manually:
|
||||
# (This requires knowing the signing key — use only in test environments)
|
||||
#
|
||||
# Alternative: Use a token from a deactivated Keycloak client
|
||||
# (revoke the client's credentials, existing tokens become invalid)
|
||||
|
||||
# Or simply wait:
|
||||
echo "Waiting 6 minutes for token to expire..."
|
||||
EXPIRED_TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-vendor-TEST-FAC-001" \
|
||||
-d "client_secret=${TEST_VENDOR_SECRET}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
sleep 360 # wait for 5-minute Keycloak default expiry
|
||||
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Authorization: Bearer ${EXPIRED_TOKEN}" \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{"resourceType": "Condition"}'
|
||||
```
|
||||
|
||||
**Expected:** `HTTP 401` — "Token has expired".
|
||||
|
||||
---
|
||||
|
||||
### Test 8 — Cluster expression: raw postcoordinated code without extension → 422
|
||||
|
||||
```bash
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X POST https://fhir.dghs.gov.bd/fhir/Condition \
|
||||
-H "Authorization: Bearer ${VENDOR_TOKEN}" \
|
||||
-H "Content-Type: application/fhir+json" \
|
||||
-d '{
|
||||
"resourceType": "Condition",
|
||||
"meta": {
|
||||
"profile": ["https://fhir.dghs.gov.bd/core/StructureDefinition/bd-condition"]
|
||||
},
|
||||
"clinicalStatus": {
|
||||
"coding": [{"system": "http://terminology.hl7.org/CodeSystem/condition-clinical", "code": "active"}]
|
||||
},
|
||||
"verificationStatus": {
|
||||
"coding": [{"system": "http://terminology.hl7.org/CodeSystem/condition-ver-status", "code": "confirmed"}]
|
||||
},
|
||||
"code": {
|
||||
"coding": [{
|
||||
"system": "http://id.who.int/icd/release/11/mms",
|
||||
"code": "1C62.0&has_severity=mild",
|
||||
"display": "Raw postcoordinated string — prohibited"
|
||||
}]
|
||||
},
|
||||
"subject": {"reference": "Patient/test-patient-001"},
|
||||
"recordedDate": "2025-03-01"
|
||||
}'
|
||||
```
|
||||
|
||||
**Expected:** `HTTP 422` with OperationOutcome diagnosing:
|
||||
`"ICD-11 postcoordinated expression in Condition.code.coding[0] must use the icd11-cluster-expression extension"`
|
||||
|
||||
Rejection code in audit: `CLUSTER_STEM_MISSING_EXTENSION`.
|
||||
|
||||
---
|
||||
|
||||
### Test 9 — Cache flush endpoint requires fhir-admin role
|
||||
|
||||
```bash
|
||||
# Attempt with vendor token (mci-api only) — should be 403
|
||||
curl -s -w "\n--- HTTP %{http_code} ---\n" \
|
||||
-X DELETE https://fhir.dghs.gov.bd/admin/terminology/cache \
|
||||
-H "Authorization: Bearer ${VENDOR_TOKEN}"
|
||||
# Expected: 403 (blocked by nginx IP restriction OR TerminologyCacheManager role check)
|
||||
|
||||
# Attempt with fhir-admin token — should be 200
|
||||
ADMIN_TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-admin-pipeline" \
|
||||
-d "client_secret=${FHIR_ADMIN_CLIENT_SECRET}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
# Note: /admin/ is restricted to 172.20.0.0/16 in nginx.
|
||||
# Run this from within the Docker network or from the server itself:
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s -X DELETE \
|
||||
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
|
||||
http://localhost:8080/admin/terminology/cache | jq .
|
||||
# Expected: 200 with { "status": "flushed", "entriesEvicted": N, ... }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 4 — Subsequent deployments (image upgrade)
|
||||
|
||||
When a new Docker image is built and pushed (new IG version, code changes):
|
||||
|
||||
```bash
|
||||
cd /opt/bd-fhir-national
|
||||
|
||||
# 1. Update image tag in .env
|
||||
nano .env
|
||||
# Change: HAPI_IMAGE=your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0
|
||||
# To: HAPI_IMAGE=your-registry.dghs.gov.bd/bd-fhir-hapi:1.1.0
|
||||
|
||||
# 2. Pull new image
|
||||
docker compose --env-file .env pull hapi
|
||||
|
||||
# 3. Rolling restart — replaces containers one at a time
|
||||
# At 1 replica (pilot): brief downtime expected (~30s)
|
||||
docker compose --env-file .env up -d --no-deps hapi
|
||||
|
||||
# At 3 replicas (Phase 2): true rolling update — scale up then scale down
|
||||
docker compose --env-file .env up -d --no-deps --scale hapi=4 hapi
|
||||
# Wait for new replica to be healthy:
|
||||
sleep 30
|
||||
docker compose --env-file .env up -d --no-deps --scale hapi=3 hapi
|
||||
|
||||
# 4. Verify startup
|
||||
docker compose --env-file .env logs --tail=50 hapi
|
||||
|
||||
# 5. Run acceptance tests (at minimum Tests 1, 2, 5)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 5 — Operational runbook
|
||||
|
||||
### View logs
|
||||
|
||||
```bash
|
||||
# All services
|
||||
docker compose --env-file .env logs -f
|
||||
|
||||
# HAPI only (structured JSON — pipe through jq)
|
||||
docker compose --env-file .env logs -f hapi | jq -R 'try fromjson'
|
||||
|
||||
# nginx access log
|
||||
docker compose --env-file .env logs -f nginx
|
||||
|
||||
# Filter for rejected submissions in HAPI logs
|
||||
docker compose --env-file .env logs hapi | \
|
||||
jq -R 'try fromjson | select(.message | contains("rejected"))'
|
||||
```
|
||||
|
||||
### Restart a specific service
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env restart hapi
|
||||
docker compose --env-file .env restart nginx
|
||||
```
|
||||
|
||||
### Emergency: full stack restart
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env down
|
||||
docker compose --env-file .env up -d
|
||||
```
|
||||
|
||||
### Query rejected submissions
|
||||
|
||||
```bash
|
||||
docker exec bd-postgres-audit psql -U postgres -d auditdb -c "
|
||||
SELECT
|
||||
submission_time,
|
||||
resource_type,
|
||||
rejection_code,
|
||||
LEFT(rejection_reason, 100) as reason,
|
||||
client_id
|
||||
FROM audit.fhir_rejected_submissions
|
||||
ORDER BY submission_time DESC
|
||||
LIMIT 20;"
|
||||
```
|
||||
|
||||
### Check pgBouncer pool status
|
||||
|
||||
```bash
|
||||
# Connect to pgBouncer admin interface
|
||||
docker exec -it bd-pgbouncer-fhir \
|
||||
psql -h localhost -p 5432 -U pgbouncer pgbouncer -c "SHOW POOLS;"
|
||||
```
|
||||
|
||||
### Monitor disk usage
|
||||
|
||||
```bash
|
||||
# PostgreSQL data volumes
|
||||
docker system df -v | grep -E "postgres|audit"
|
||||
|
||||
# Log volume
|
||||
docker system df -v | grep hapi-logs
|
||||
```
|
||||
303
ops/keycloak-setup.md
Normal file
303
ops/keycloak-setup.md
Normal file
@@ -0,0 +1,303 @@
|
||||
# Keycloak Setup — BD FHIR National
|
||||
|
||||
**Realm:** `hris`
|
||||
**Keycloak URL:** `https://auth.dghs.gov.bd`
|
||||
**Audience:** DGHS Identity and Access Management team
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
This document covers the Keycloak configuration required for BD FHIR National
|
||||
deployment. It assumes the `hris` realm and `mci-api` role already exist
|
||||
(pre-existing national HRIS configuration). Only the additions for FHIR
|
||||
deployment are documented here.
|
||||
|
||||
---
|
||||
|
||||
## Part 1 — Create `fhir-admin` realm role
|
||||
|
||||
The `fhir-admin` role grants access to:
|
||||
- `DELETE /admin/terminology/cache` — terminology cache flush
|
||||
- `GET /admin/terminology/cache/stats` — cache statistics
|
||||
|
||||
This role is **not** assigned to vendor clients. It is assigned only to the
|
||||
ICD-11 version upgrade pipeline service account and DGHS system administrators.
|
||||
|
||||
### Steps (Keycloak Admin Console)
|
||||
|
||||
1. Log in to `https://auth.dghs.gov.bd/admin/master/console`
|
||||
2. Select realm: **hris**
|
||||
3. Navigate to: **Realm roles** → **Create role**
|
||||
4. Fill in:
|
||||
- **Role name:** `fhir-admin`
|
||||
- **Description:** `BD FHIR server administrative operations — cache management and system configuration`
|
||||
5. Click **Save**
|
||||
|
||||
### Steps (Keycloak Admin REST API — for automation)
|
||||
|
||||
```bash
|
||||
# Get admin token
|
||||
ADMIN_TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/master/protocol/openid-connect/token" \
|
||||
-d "grant_type=password" \
|
||||
-d "client_id=admin-cli" \
|
||||
-d "username=${KEYCLOAK_ADMIN_USER}" \
|
||||
-d "password=${KEYCLOAK_ADMIN_PASSWORD}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
# Create fhir-admin role
|
||||
curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/admin/realms/hris/roles" \
|
||||
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "fhir-admin",
|
||||
"description": "BD FHIR server administrative operations"
|
||||
}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 2 — Create `fhir-admin` service account client
|
||||
|
||||
The version upgrade pipeline authenticates with a dedicated client.
|
||||
This client must never be shared with vendor systems.
|
||||
|
||||
### Steps (Admin Console)
|
||||
|
||||
1. Navigate to: **Clients** → **Create client**
|
||||
2. **Client type:** OpenID Connect
|
||||
3. **Client ID:** `fhir-admin-pipeline`
|
||||
4. Click **Next**
|
||||
5. **Client authentication:** ON (confidential client)
|
||||
6. **Service accounts roles:** ON
|
||||
7. **Standard flow:** OFF (machine-to-machine only)
|
||||
8. Click **Save**
|
||||
|
||||
### Assign fhir-admin role to service account
|
||||
|
||||
1. Navigate to: **Clients** → `fhir-admin-pipeline` → **Service accounts roles**
|
||||
2. Click **Assign role**
|
||||
3. Filter by: **Filter by realm roles**
|
||||
4. Select: `fhir-admin`
|
||||
5. Click **Assign**
|
||||
|
||||
### Retrieve client secret
|
||||
|
||||
1. Navigate to: **Clients** → `fhir-admin-pipeline` → **Credentials**
|
||||
2. Copy **Client secret** — store in your secrets vault
|
||||
3. This secret is used in `ops/version-upgrade-integration.md`
|
||||
|
||||
---
|
||||
|
||||
## Part 3 — Configure vendor clients
|
||||
|
||||
Each vendor organisation requires one Keycloak client. This section documents
|
||||
the **template** for creating a vendor client. Repeat for each vendor.
|
||||
|
||||
### Naming convention
|
||||
|
||||
```
|
||||
fhir-vendor-{organisation-id}
|
||||
```
|
||||
|
||||
Where `{organisation-id}` is the DGHS facility code, e.g.:
|
||||
- `fhir-vendor-DGHS-FAC-001` for Dhaka Medical College Hospital
|
||||
- `fhir-vendor-DGHS-FAC-002` for Square Hospital
|
||||
|
||||
### Steps (Admin Console)
|
||||
|
||||
1. Navigate to: **Clients** → **Create client**
|
||||
2. **Client type:** OpenID Connect
|
||||
3. **Client ID:** `fhir-vendor-{organisation-id}`
|
||||
4. Click **Next**
|
||||
5. **Client authentication:** ON
|
||||
6. **Service accounts roles:** ON
|
||||
7. **Standard flow:** OFF
|
||||
8. Click **Save**
|
||||
|
||||
### Assign mci-api role
|
||||
|
||||
1. Navigate to: **Clients** → `fhir-vendor-{org-id}` → **Service accounts roles**
|
||||
2. Click **Assign role**
|
||||
3. Select: `mci-api`
|
||||
4. Click **Assign**
|
||||
|
||||
### Add sending_facility user attribute
|
||||
|
||||
The `sending_facility` claim is a custom token mapper that injects the vendor's
|
||||
DGHS facility code into every token issued to this client. The
|
||||
`KeycloakJwtInterceptor` reads this claim for audit logging.
|
||||
|
||||
**Without this mapper, audit logs will show `client_id` as the facility
|
||||
identifier instead of the DGHS facility code. This degrades audit quality
|
||||
and generates WARN logs in HAPI on every submission.**
|
||||
|
||||
#### Create user attribute on service account
|
||||
|
||||
1. Navigate to: **Clients** → `fhir-vendor-{org-id}` → **Service accounts**
|
||||
2. Click the service account user link (e.g., `service-account-fhir-vendor-xxx`)
|
||||
3. Navigate to: **Attributes** tab
|
||||
4. Click **Add attribute**
|
||||
5. Key: `sending_facility`
|
||||
6. Value: `{DGHS facility code}` (e.g., `DGHS-FAC-001`)
|
||||
7. Click **Save**
|
||||
|
||||
#### Create token mapper
|
||||
|
||||
1. Navigate to: **Clients** → `fhir-vendor-{org-id}` → **Client scopes**
|
||||
2. Click the dedicated scope link (e.g., `fhir-vendor-xxx-dedicated`)
|
||||
3. Navigate to: **Mappers** → **Add mapper** → **By configuration**
|
||||
4. Select: **User Attribute**
|
||||
5. Fill in:
|
||||
- **Name:** `sending-facility-mapper`
|
||||
- **User Attribute:** `sending_facility`
|
||||
- **Token Claim Name:** `sending_facility`
|
||||
- **Claim JSON Type:** String
|
||||
- **Add to access token:** ON
|
||||
- **Add to ID token:** OFF
|
||||
- **Add to userinfo:** OFF
|
||||
6. Click **Save**
|
||||
|
||||
#### Verify token contains sending_facility
|
||||
|
||||
```bash
|
||||
# Get vendor token
|
||||
TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-vendor-{org-id}" \
|
||||
-d "client_secret={secret}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
# Decode and check claims (base64 decode middle segment)
|
||||
echo $TOKEN | cut -d. -f2 | base64 -d 2>/dev/null | jq '{
|
||||
iss,
|
||||
sub,
|
||||
azp,
|
||||
exp,
|
||||
sending_facility,
|
||||
realm_access: .realm_access.roles
|
||||
}'
|
||||
|
||||
# Expected output:
|
||||
# {
|
||||
# "iss": "https://auth.dghs.gov.bd/realms/hris",
|
||||
# "sub": "...",
|
||||
# "azp": "fhir-vendor-{org-id}",
|
||||
# "exp": ...,
|
||||
# "sending_facility": "DGHS-FAC-001",
|
||||
# "realm_access": ["mci-api", "offline_access"]
|
||||
# }
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 4 — Token validation verification
|
||||
|
||||
After creating a client, verify the full token validation chain works
|
||||
before onboarding the vendor.
|
||||
|
||||
### Test 1 — Valid token accepted
|
||||
|
||||
```bash
|
||||
TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-vendor-{org-id}" \
|
||||
-d "client_secret={secret}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
curl -s -o /dev/null -w "%{http_code}" \
|
||||
-H "Authorization: Bearer ${TOKEN}" \
|
||||
https://fhir.dghs.gov.bd/fhir/Patient
|
||||
|
||||
# Expected: 200 (empty bundle) or 404 — NOT 401
|
||||
```
|
||||
|
||||
### Test 2 — Missing token rejected
|
||||
|
||||
```bash
|
||||
curl -s -o /dev/null -w "%{http_code}" \
|
||||
https://fhir.dghs.gov.bd/fhir/Patient
|
||||
|
||||
# Expected: 401
|
||||
```
|
||||
|
||||
### Test 3 — Expired token rejected
|
||||
|
||||
```bash
|
||||
# Use a deliberately expired token (exp in the past)
|
||||
# Easiest: wait for a token to expire (default Keycloak token lifetime: 5 minutes)
|
||||
# Then attempt a request with the expired token.
|
||||
|
||||
# Expected: 401
|
||||
```
|
||||
|
||||
### Test 4 — Wrong realm rejected
|
||||
|
||||
```bash
|
||||
# Get a token from a different realm (if available) or forge iss claim
|
||||
# Expected: 401
|
||||
```
|
||||
|
||||
### Test 5 — mci-api role required
|
||||
|
||||
```bash
|
||||
# Create a test client WITHOUT mci-api role
|
||||
# Get token for that client
|
||||
# Attempt FHIR request
|
||||
# Expected: 401
|
||||
```
|
||||
|
||||
### Test 6 — fhir-admin endpoint requires fhir-admin role
|
||||
|
||||
```bash
|
||||
# Use a vendor token (mci-api only, no fhir-admin)
|
||||
VENDOR_TOKEN=...
|
||||
|
||||
curl -s -w "\n%{http_code}" \
|
||||
-X DELETE \
|
||||
-H "Authorization: Bearer ${VENDOR_TOKEN}" \
|
||||
https://fhir.dghs.gov.bd/admin/terminology/cache
|
||||
|
||||
# Expected: 403
|
||||
|
||||
# Use fhir-admin token
|
||||
ADMIN_TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-admin-pipeline" \
|
||||
-d "client_secret={admin_secret}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
curl -s -w "\n%{http_code}" \
|
||||
-X DELETE \
|
||||
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
|
||||
https://fhir.dghs.gov.bd/admin/terminology/cache
|
||||
|
||||
# Expected: 200 with flush summary JSON
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 5 — Token lifetime configuration
|
||||
|
||||
Keycloak default access token lifetime is 5 minutes. For machine-to-machine
|
||||
FHIR submissions, this is appropriate — vendor systems must refresh tokens
|
||||
before expiry. Do not increase the token lifetime to accommodate vendors who
|
||||
are not refreshing tokens correctly. Token refresh is the vendor's
|
||||
responsibility, not a server-side workaround.
|
||||
|
||||
**Recommended settings for vendor clients:**
|
||||
|
||||
| Setting | Value | Rationale |
|
||||
|---------|-------|-----------|
|
||||
| Access Token Lifespan | 5 minutes | Short-lived — minimises window for token replay |
|
||||
| Refresh Token Max Reuse | 0 | One-time use refresh tokens |
|
||||
| Client Session Idle | 30 minutes | Vendor batch jobs may pause between submissions |
|
||||
| Client Session Max | 8 hours | Maximum session for a single batch run |
|
||||
|
||||
Configure at: **Realm Settings** → **Tokens** for defaults,
|
||||
or per-client at: **Clients** → `{client}` → **Advanced** → **Advanced settings**.
|
||||
271
ops/project-manifest.md
Normal file
271
ops/project-manifest.md
Normal file
@@ -0,0 +1,271 @@
|
||||
# BD FHIR National — Project Manifest & Pre-Flight Checklist
|
||||
|
||||
**Project:** BD Core FHIR National Repository and Validation Engine
|
||||
**IG Version:** BD Core FHIR IG v0.2.1
|
||||
**FHIR Version:** R4 (4.0.1)
|
||||
**HAPI Version:** 7.2.0
|
||||
**Published by:** DGHS/MoHFW Bangladesh
|
||||
**Generated:** 2025
|
||||
|
||||
---
|
||||
|
||||
## Complete file manifest
|
||||
|
||||
### Build and orchestration
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `pom.xml` | 1 | Parent Maven POM. HAPI 7.2.0 BOM, Spring Boot 3.2.5, all version pins. |
|
||||
| `hapi-overlay/pom.xml` | 2 | Child module POM. All runtime dependencies. Fat JAR output: `bd-fhir-hapi.jar`. |
|
||||
| `hapi-overlay/Dockerfile` | 4 | Multi-stage build: Maven builder + eclipse-temurin:17-jre runtime. tini as PID 1. |
|
||||
| `docker-compose.yml` | 4 | Production orchestration: HAPI, 2× PostgreSQL, 2× pgBouncer, nginx. Scaling roadmap in comments. |
|
||||
| `.env.example` | 4 | Environment variable template. Copy to `.env`, fill secrets, `chmod 600`. |
|
||||
|
||||
### Database
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/resources/db/migration/fhir/V1__hapi_schema.sql` | 3 | HAPI 7.2.0 JPA schema. All tables, sequences, indexes. Flyway-managed. Partition comments at 10M+ rows. |
|
||||
| `hapi-overlay/src/main/resources/db/migration/audit/V2__audit_schema.sql` | 3 | Audit schema. Partitioned `audit_events` and `fhir_rejected_submissions` by month 2025-2027. INSERT-only role grants. `create_next_month_partitions()` maintenance function. |
|
||||
| `postgres/fhir/postgresql.conf` | 4 | PostgreSQL 15 tuning for HAPI JPA workload. 2GB container. SSD-optimised. |
|
||||
| `postgres/audit/postgresql.conf` | 4 | PostgreSQL 15 tuning for audit INSERT workload. 1GB container. |
|
||||
| `postgres/fhir/init.sql` | 4 | Template — **replace with `init.sh`** per deployment-guide.md §1.6 before first deploy. |
|
||||
| `postgres/audit/init.sql` | 4 | Template — **replace with `init.sh`** per deployment-guide.md §1.6 before first deploy. |
|
||||
|
||||
### Application configuration
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/resources/application.yaml` | 5 | Complete Spring Boot + HAPI configuration. Dual datasource, dual Flyway, HAPI R4, validation chain, actuator, structured logging. All secrets via env vars. |
|
||||
| `hapi-overlay/src/main/resources/logback-spring.xml` | 5 | Structured JSON logging via logstash-logback-encoder. Async appenders. MDC field inclusion. |
|
||||
|
||||
### Java source — entry point
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/BdFhirApplication.java` | 12 | Spring Boot entry point. `@EnableAsync` activates audit async executor. |
|
||||
|
||||
### Java source — configuration
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/config/DataSourceConfig.java` | 6 | Dual datasource wiring. Primary FHIR datasource (HikariCP, pgBouncer session mode). Secondary audit datasource (INSERT-only). Dual Flyway instances. `auditDbHealthIndicator` using INSERT test. `oclHealthIndicator`. `entityManagerFactory` bound explicitly to FHIR datasource. |
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/config/FhirServerConfig.java` | 6 | Validation support chain (6 supports in dependency order). `NpmPackageValidationSupport` loading BD Core IG. `RequestValidatingInterceptor` with failOnSeverity=ERROR. `unvalidatedProfileTagInterceptor` for unknown resource types. Startup IG presence check. |
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/config/SecurityConfig.java` | 8 | Registers JWT, validation, and audit interceptors into HAPI RestfulServer in correct order. HTTPS enforcement filter. Security response headers filter. |
|
||||
|
||||
### Java source — initialisation
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/init/IgPackageInitializer.java` | 9 | `InitializingBean` that loads BD Core IG with PostgreSQL advisory lock. Prevents multi-replica NPM_PACKAGE race condition. djb2 hash for stable lock key. |
|
||||
|
||||
### Java source — interceptors
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/interceptor/KeycloakJwtInterceptor.java` | 8 | Nimbus JOSE+JWT with `RemoteJWKSet` (1-hour TTL, kid-based refresh). Validates: signature, expiry, issuer, `mci-api` role. Extracts: `client_id`, `subject`, `sending_facility`. Sets all `REQUEST_ATTR_*` constants. MDC population and guaranteed cleanup. `GET /fhir/metadata` and actuator health exempt. |
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/interceptor/AuditEventInterceptor.java` | 9 | Three-hook interceptor: (1) cluster expression pre-validation, (2) accepted resource audit at `STORAGE_PRESTORAGE_*`, (3) rejected resource audit at `SERVER_HANDLE_EXCEPTION`. Routes to `AuditEventEmitter` and `RejectedSubmissionSink` asynchronously. |
|
||||
|
||||
### Java source — terminology
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/terminology/BdTerminologyValidationSupport.java` | 7 | Custom `IValidationSupport`. Forces `$validate-code` for ICD-11. Suppresses `$expand` via `isValueSetSupported()=false`. 24-hour `ConcurrentHashMap` cache with TTL eviction. Retry with exponential backoff. Fail-open on OCL outage. `flushCache()` called by `TerminologyCacheManager`. |
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/terminology/TerminologyCacheManager.java` | 7 | REST controller: `DELETE /admin/terminology/cache` and `GET /admin/terminology/cache/stats`. Requires `fhir-admin` role (read from `REQUEST_ATTR_IS_ADMIN`). Called by ICD-11 version upgrade pipeline. |
|
||||
|
||||
### Java source — validator
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/validator/ClusterExpressionValidator.java` | 7 | Detects `icd11-cluster-expression` extension on ICD-11 `Coding` elements. Rejects raw postcoordinated strings (contains `&`, `/`, `%` without extension) with 422. Calls `https://icd11.dghs.gov.bd/cluster/validate` for full expression validation. Fail-open on cluster validator outage. |
|
||||
|
||||
### Java source — audit
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/audit/AuditEventEmitter.java` | 9 | `@Async` INSERT to `audit.audit_events`. Immutable (INSERT only — `audit_writer` role enforces at DB level). Serialises `validationMessages` as JSONB. Truncates fields to column lengths. Logs ERROR on write failure (audit gap is a high-priority incident). |
|
||||
| `hapi-overlay/src/main/java/bd/gov/dghs/fhir/audit/RejectedSubmissionSink.java` | 9 | `@Async` INSERT to `audit.fhir_rejected_submissions`. Stores full resource payload as TEXT (preserves exact bytes). 4MB payload cap (anti-DoS). Machine-readable `rejection_code` for programmatic analysis. |
|
||||
|
||||
### Infrastructure
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `nginx/nginx.conf` | 10 | Reverse proxy. TLS 1.2/1.3 only. Rate limiting: FHIR 10r/s, admin 6r/m, metadata 5r/s. `/admin/` restricted to `172.20.0.0/16`. `/actuator/` restricted to internal network. `/fhir/metadata` unauthenticated. All other paths → HAPI. |
|
||||
| `hapi-overlay/src/main/resources/packages/.gitkeep` | 12 | Marks the IG package directory for git. CI pipeline places `bd.gov.dghs.core-{version}.tgz` here before `docker build`. |
|
||||
|
||||
### Operations
|
||||
|
||||
| File | Step | Purpose |
|
||||
|------|------|---------|
|
||||
| `ops/keycloak-setup.md` | 10 | `fhir-admin` role creation. `fhir-admin-pipeline` client setup. Vendor client template. `sending_facility` mapper configuration. Token verification tests. |
|
||||
| `ops/version-upgrade-integration.md` | 10 | ICD-11 upgrade pipeline integration. Pre-flush OCL verification. `get_fhir_admin_token()`, `flush_hapi_terminology_cache()`, `verify_hapi_validates_new_version()` Python functions. `post_ocl_import_hapi_integration()` call site. Rollback procedure. |
|
||||
| `ops/scaling-roadmap.md` | 10 | Phase 1→2→3 thresholds and changes. Monthly partition maintenance cron. PostgreSQL monitoring queries. IG upgrade procedure. Key Prometheus metrics and alert thresholds. |
|
||||
| `ops/deployment-guide.md` | 11 | Step-by-step Ubuntu 22.04 deployment. Docker install, daemon config, registry auth. PostgreSQL init script fix (critical). First-deploy sequence. Nine acceptance tests. Rolling upgrade procedure. Operational runbook. |
|
||||
|
||||
---
|
||||
|
||||
## Pre-flight checklist
|
||||
|
||||
Work through this list top to bottom before running `docker compose up`.
|
||||
Each item is a documented failure mode from real HAPI deployments.
|
||||
**Do not skip items marked CRITICAL.**
|
||||
|
||||
---
|
||||
|
||||
### CI machine (before docker build)
|
||||
|
||||
- [ ] **[CRITICAL]** `bd.gov.dghs.core-0.2.1.tgz` placed in `hapi-overlay/src/main/resources/packages/`
|
||||
*Symptom if missing: startup fails with `STARTUP FAILURE: BD Core IG package not found`. Container will not start.*
|
||||
|
||||
- [ ] `HAPI_IG_PACKAGE_CLASSPATH` in `docker-compose.yml` matches the `.tgz` filename exactly
|
||||
*Symptom if mismatch: same STARTUP FAILURE as above.*
|
||||
|
||||
- [ ] Docker image built with correct `--build-arg` values and pushed to private registry
|
||||
*Verify: `docker manifest inspect your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0`*
|
||||
|
||||
- [ ] Image tag in `.env.example` (and your `.env`) matches the pushed image tag
|
||||
*Symptom if mismatch: `docker compose pull` pulls wrong image or fails.*
|
||||
|
||||
---
|
||||
|
||||
### Production server (before docker compose up)
|
||||
|
||||
- [ ] **[CRITICAL]** `postgres/fhir/init.sql` replaced with `init.sh` (deployment-guide.md §1.6)
|
||||
*Symptom if skipped: `hapi_app` user is never created. Flyway migrations succeed but HAPI runtime fails with authentication error to postgres-fhir.*
|
||||
|
||||
- [ ] **[CRITICAL]** `postgres/audit/init.sql` replaced with `init.sh` (deployment-guide.md §1.6)
|
||||
*Symptom if skipped: `audit_writer_login` never created. HAPI starts but all audit writes fail with `FATAL: password authentication failed for user "audit_writer_login"`.*
|
||||
|
||||
- [ ] `docker-compose.yml` `postgres-audit` service updated to mount `init.sh` (not `init.sql`) and passes `AUDIT_DB_WRITER_USER/PASSWORD/MAINTAINER_*` env vars
|
||||
*Follows from the init.sh fix above.*
|
||||
|
||||
- [ ] `.env` file created, all `<CHANGE_ME>` values replaced, `chmod 600 .env`
|
||||
*Verify: `grep CHANGE_ME .env` returns no output.*
|
||||
|
||||
- [ ] `TLS_CERT_PATH` and `TLS_KEY_PATH` in `.env` point to files that exist on the server
|
||||
*Verify: `ls -la $(grep TLS_CERT_PATH .env | cut -d= -f2)`*
|
||||
|
||||
- [ ] Server can reach all external services from within the Docker network:
|
||||
```bash
|
||||
# Test from inside a temporary container on the Docker network
|
||||
docker run --rm --network bd-fhir-national_backend-fhir alpine sh -c \
|
||||
"apk add -q curl && curl -s -o /dev/null -w '%{http_code}' \
|
||||
https://auth.dghs.gov.bd/realms/hris/.well-known/openid-configuration"
|
||||
# Expected: 200
|
||||
```
|
||||
*Symptom if unreachable: KeycloakJwtInterceptor fails to fetch JWKS on startup. All authenticated requests return 401 even with valid tokens.*
|
||||
|
||||
- [ ] `random_page_cost` in both `postgresql.conf` files matches your storage type
|
||||
`1.1` for SSD (default in this project), `4.0` for spinning HDD
|
||||
*Symptom if wrong: query planner chooses sequential scans over indexes. FHIR search performance degrades at >100k resources.*
|
||||
|
||||
- [ ] Docker and Docker Compose v2 installed (`docker compose version`, not `docker-compose`)
|
||||
*Symptom if wrong: `docker-compose` (v1) does not support `deploy.replicas` or `condition: service_healthy`.*
|
||||
|
||||
- [ ] Private registry credentials stored in `~/.docker/config.json`
|
||||
*Verify: `docker login your-registry.dghs.gov.bd`*
|
||||
|
||||
---
|
||||
|
||||
### Keycloak (before first vendor submission)
|
||||
|
||||
- [ ] **[CRITICAL]** `fhir-admin` realm role created in `hris` realm (keycloak-setup.md Part 1)
|
||||
*Symptom if missing: `fhir-admin-pipeline` service account has no role to assign. Cache flush endpoint returns 403 for all callers.*
|
||||
|
||||
- [ ] **[CRITICAL]** `fhir-admin-pipeline` client created with `fhir-admin` role assigned (keycloak-setup.md Part 2)
|
||||
*Symptom if missing: version upgrade pipeline cannot flush cache. After ICD-11 upgrade, stale codes accepted/rejected for up to 24 hours.*
|
||||
|
||||
- [ ] At least one vendor client created (`fhir-vendor-TEST-FAC-001` for acceptance testing) with `mci-api` role and `sending_facility` attribute mapper (keycloak-setup.md Parts 3-4)
|
||||
*Symptom if missing: acceptance Test 1 returns 401. All vendor submissions rejected.*
|
||||
|
||||
- [ ] Token from test vendor client decoded and verified to contain:
|
||||
- `iss`: `https://auth.dghs.gov.bd/realms/hris`
|
||||
- `azp`: `fhir-vendor-TEST-FAC-001`
|
||||
- `realm_access.roles`: contains `mci-api`
|
||||
- `sending_facility`: non-empty facility code
|
||||
*Verify with: `echo $TOKEN | cut -d. -f2 | base64 -d 2>/dev/null | jq .`*
|
||||
|
||||
---
|
||||
|
||||
### Post-startup verification
|
||||
|
||||
- [ ] All health indicators GREEN:
|
||||
```bash
|
||||
curl -s http://localhost:8080/actuator/health | jq '.components | keys'
|
||||
# Expected: ["auditDb", "db", "livenessState", "ocl", "readinessState"]
|
||||
# All must show "status": "UP"
|
||||
```
|
||||
|
||||
- [ ] FHIR metadata accessible unauthenticated and shows correct IG version:
|
||||
```bash
|
||||
curl -s https://fhir.dghs.gov.bd/fhir/metadata | jq '.software.version'
|
||||
# Expected: "0.2.1"
|
||||
```
|
||||
|
||||
- [ ] Flyway migration history shows V1 and V2 applied cleanly:
|
||||
```bash
|
||||
docker exec bd-postgres-fhir psql -U postgres -d fhirdb \
|
||||
-c "SELECT version, description, success FROM flyway_schema_history;"
|
||||
# Expected: V1 | hapi_schema | t
|
||||
|
||||
docker exec bd-postgres-audit psql -U postgres -d auditdb \
|
||||
-c "SELECT version, description, success FROM flyway_audit_schema_history;"
|
||||
# Expected: V2 | audit_schema | t
|
||||
```
|
||||
|
||||
- [ ] Audit tables accepting inserts (INSERT-only role working):
|
||||
```bash
|
||||
docker exec bd-postgres-audit psql -U audit_writer_login -d auditdb -c \
|
||||
"INSERT INTO audit.health_check (check_id) VALUES (gen_random_uuid())
|
||||
ON CONFLICT DO NOTHING; SELECT 'audit insert ok';"
|
||||
# Expected: audit insert ok
|
||||
```
|
||||
|
||||
- [ ] **Run all nine acceptance tests** from deployment-guide.md Part 3
|
||||
Tests 1-9 must all produce the expected HTTP status codes before the server is declared production-ready.
|
||||
|
||||
---
|
||||
|
||||
### Operational readiness (before announcing to vendors)
|
||||
|
||||
- [ ] Partition maintenance cron configured on audit database host (scaling-roadmap.md)
|
||||
*Run: `docker exec bd-postgres-audit psql -U postgres -d auditdb -c "SELECT audit.create_next_month_partitions();"` — verify it creates next month without error.*
|
||||
|
||||
- [ ] Log shipping to ELK configured (or Filebeat agent installed and shipping `/app/logs/`)
|
||||
*Minimum: verify logs appear at `docker compose logs hapi` in JSON format.*
|
||||
|
||||
- [ ] `FHIR_ADMIN_CLIENT_SECRET` stored in version upgrade pipeline's secrets vault
|
||||
*Required by `ops/version-upgrade-integration.md` before next ICD-11 release.*
|
||||
|
||||
- [ ] Next ICD-11 version upgrade date noted — cache flush must be coordinated with OCL import completion
|
||||
*See `ops/version-upgrade-integration.md` for the 7-step procedure.*
|
||||
|
||||
- [ ] Vendor onboarding runbook prepared citing `ops/keycloak-setup.md` Parts 3-4
|
||||
*Each new vendor requires: Keycloak client, `mci-api` role, `sending_facility` mapper, credentials delivery.*
|
||||
|
||||
---
|
||||
|
||||
## Architecture decision record — key decisions frozen in this implementation
|
||||
|
||||
The following decisions were finalised through the pre-implementation challenge process
|
||||
and are reflected throughout the codebase. They are not configurable at runtime
|
||||
without code changes.
|
||||
|
||||
| Decision | Rationale | Where enforced |
|
||||
|----------|-----------|---------------|
|
||||
| PostgreSQL only, no H2 | National infrastructure requires production-grade persistence | `DataSourceConfig.java`, Flyway migrations, `docker-compose.yml` |
|
||||
| Validation on ALL requests | No vendor exemptions — uniform HIE boundary | `RequestValidatingInterceptor` with `failOnSeverity=ERROR` |
|
||||
| OCL is single terminology authority | No local ICD-11 copy — live validation | `BdTerminologyValidationSupport`, chain position 6 |
|
||||
| `$expand` failures never cause rejection | Known OCL limitation | `isValueSetSupported()=false`, `expandValueSet()` returns null |
|
||||
| Only `$validate-code` failures cause 422 | Distinguish expansion from validation | `BdTerminologyValidationSupport.validateCode()` |
|
||||
| Keycloak `hris` realm, `mci-api` role, no basic auth | Single authentication authority | `KeycloakJwtInterceptor`, `SecurityConfig` |
|
||||
| Audit log append-only, separate instance | Immutability, forensic separation | `postgres-audit` separate container, `audit_writer` INSERT-only role |
|
||||
| Rejected payloads stored forensically | Vendor debugging, dispute resolution | `RejectedSubmissionSink`, `audit.fhir_rejected_submissions` |
|
||||
| IG bundled in Docker image | Reproducible builds, no runtime URL dependency | `Dockerfile` COPY, `IgPackageInitializer` |
|
||||
| Cluster expressions via extension, not raw code | BD Core IG decided pattern | `ClusterExpressionValidator`, `POSTCOORD_CHARS` rejection |
|
||||
| Fail-open for OCL/cluster validator outages | Service continuity over perfect validation | `BdTerminologyValidationSupport` catch blocks, `ClusterExpressionValidator` catch blocks |
|
||||
| `meta.tag = unvalidated-profile` for unknown types | FHIR-native, queryable, no schema changes | `unvalidatedProfileTagInterceptor` in `FhirServerConfig` |
|
||||
| pgBouncer session mode | Hibernate prepared statement compatibility | `docker-compose.yml` `PGBOUNCER_POOL_MODE: session` |
|
||||
| Flyway bypasses pgBouncer for migrations | DDL transaction safety | `SPRING_FLYWAY_URL` points to `postgres-fhir:5432` directly |
|
||||
| Advisory lock for IG initialisation | Multi-replica startup race prevention | `IgPackageInitializer` djb2 lock key |
|
||||
| Two MDC cleanup hooks | Thread pool MDC leak prevention | `KeycloakJwtInterceptor` `COMPLETED_NORMALLY` + `COMPLETED` |
|
||||
327
ops/scaling-roadmap.md
Normal file
327
ops/scaling-roadmap.md
Normal file
@@ -0,0 +1,327 @@
|
||||
# Scaling Roadmap — BD FHIR National
|
||||
|
||||
**Audience:** DGHS infrastructure team, future system architects
|
||||
**Current phase:** Pilot (Phase 1)
|
||||
|
||||
---
|
||||
|
||||
## Phase thresholds
|
||||
|
||||
| Metric | Phase 1 (Pilot) | Phase 2 (Regional) | Phase 3 (National) |
|
||||
|--------|-----------------|---------------------|---------------------|
|
||||
| Vendors | <50 | <500 | >500 |
|
||||
| Resources/day | <10,000 | <100,000 | >1,000,000 |
|
||||
| Resources total | <1M | <10M | >10M |
|
||||
| HAPI replicas | 1 | 3 | 5-10+ |
|
||||
| Orchestrator | docker-compose | docker-compose | Kubernetes |
|
||||
| PostgreSQL | Single instance | Primary + replica | Patroni HA cluster |
|
||||
| Estimated trigger | Now | 6-18 months | 18-36 months |
|
||||
|
||||
---
|
||||
|
||||
## Phase 1 → Phase 2 changes
|
||||
|
||||
### 1. Scale HAPI replicas to 3
|
||||
|
||||
No configuration changes required — the architecture was designed for this from day one.
|
||||
|
||||
```bash
|
||||
# On the production Ubuntu server
|
||||
cd /opt/bd-fhir-national
|
||||
docker-compose --env-file .env up -d --scale hapi=3
|
||||
```
|
||||
|
||||
**Verify after scaling:**
|
||||
```bash
|
||||
# All 3 replicas healthy
|
||||
docker-compose ps hapi
|
||||
|
||||
# nginx is load balancing across all 3
|
||||
# (check HAPI logs — requests should appear in all replica logs)
|
||||
docker-compose logs --tail=50 hapi
|
||||
|
||||
# pgBouncer pool has sufficient capacity
|
||||
# 3 replicas × 5 HikariCP connections = 15 connections
|
||||
# pgBouncer pool_size=20 — 5 headroom remaining. Acceptable.
|
||||
```
|
||||
|
||||
**pgBouncer adjustment at 3+ replicas:**
|
||||
At 5 replicas (5 × 5 = 25 connections), the current pgBouncer pool_size=20 becomes
|
||||
a bottleneck. Update docker-compose.yml:
|
||||
|
||||
```yaml
|
||||
# pgbouncer-fhir environment:
|
||||
PGBOUNCER_DEFAULT_POOL_SIZE: "30" # was 20
|
||||
# And increase postgres-fhir max_connections in postgresql.conf:
|
||||
# max_connections = 40 # was 30
|
||||
```
|
||||
|
||||
### 2. Add PostgreSQL streaming replication (read replica)
|
||||
|
||||
For read-heavy workloads (FHIR search, bulk export), add a read replica.
|
||||
HAPI supports separate read and write datasource URLs.
|
||||
|
||||
```yaml
|
||||
# Add to docker-compose.yml:
|
||||
postgres-fhir-replica:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: fhirdb
|
||||
PGUSER: replicator
|
||||
POSTGRES_PASSWORD: ${FHIR_REPLICA_PASSWORD}
|
||||
volumes:
|
||||
- postgres-fhir-replica-data:/var/lib/postgresql/data
|
||||
- ./postgres/fhir/replica.conf:/etc/postgresql/postgresql.conf:ro
|
||||
command: >
|
||||
bash -c "
|
||||
until pg_basebackup -h postgres-fhir -U replicator -D /var/lib/postgresql/data -P -Xs -R; do
|
||||
sleep 5;
|
||||
done && postgres -c config_file=/etc/postgresql/postgresql.conf"
|
||||
networks:
|
||||
- backend-fhir
|
||||
```
|
||||
|
||||
Add `HAPI_DATASOURCE_READ_URL` environment variable pointing to the replica,
|
||||
and update `DataSourceConfig.java` to configure a separate read datasource.
|
||||
|
||||
### 3. Add Redis for distributed JWKS cache
|
||||
|
||||
Currently each HAPI replica maintains an independent in-memory JWKS cache.
|
||||
At 3 replicas, a Keycloak key rotation triggers 3 independent JWKS re-fetches
|
||||
within the same second. This is acceptable. At 10+ replicas, add Redis for
|
||||
a shared JWKS cache to reduce Keycloak load.
|
||||
|
||||
```yaml
|
||||
# Add to docker-compose.yml:
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
networks:
|
||||
- frontend
|
||||
- backend-fhir
|
||||
command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD}
|
||||
```
|
||||
|
||||
Update `KeycloakJwtInterceptor` to use Spring Cache with Redis backend for JWKS storage.
|
||||
|
||||
---
|
||||
|
||||
## Phase 2 → Phase 3 changes
|
||||
|
||||
### Move to Kubernetes
|
||||
|
||||
At national scale, docker-compose is not the correct orchestrator. Kubernetes
|
||||
provides:
|
||||
- Horizontal Pod Autoscaler (scale on CPU/RPS automatically)
|
||||
- Rolling deployments (zero-downtime IG version upgrades)
|
||||
- Pod Disruption Budgets (maintain minimum replicas during node maintenance)
|
||||
- Namespace isolation (separate FHIR, audit, monitoring namespaces)
|
||||
|
||||
**Kubernetes equivalents:**
|
||||
|
||||
| docker-compose service | Kubernetes resource |
|
||||
|------------------------|---------------------|
|
||||
| hapi (--scale N) | Deployment + HPA |
|
||||
| postgres-fhir | StatefulSet (or external Patroni) |
|
||||
| postgres-audit | StatefulSet (or external Patroni) |
|
||||
| pgbouncer-fhir | Deployment (sidecar or standalone) |
|
||||
| nginx | Ingress (nginx-ingress-controller) |
|
||||
|
||||
### Partition HAPI JPA tables
|
||||
|
||||
At 5M+ resources in `HFJ_RESOURCE`, evaluate partitioning (see V1 migration comments).
|
||||
|
||||
Prerequisites before partitioning HAPI JPA tables:
|
||||
1. HAPI must be stopped during the migration (ALTER TABLE is not online in PostgreSQL 15)
|
||||
2. Foreign key references to HFJ_RESOURCE from all SPIDX tables must be updated
|
||||
3. The partition key must be included in all primary keys
|
||||
4. Hibernate DDL validation must be disabled during migration, then re-enabled
|
||||
|
||||
This is a planned maintenance window operation — minimum 4-hour downtime window
|
||||
for a database with 5M resources. At 10,000 resources/day, you have
|
||||
approximately 18 months to plan this migration from initial deployment.
|
||||
|
||||
**Trigger:** Run `EXPLAIN ANALYZE` on a representative FHIR search query.
|
||||
When sequential scans on HFJ_RESOURCE appear in the plan despite indexes,
|
||||
partitioning is overdue.
|
||||
|
||||
---
|
||||
|
||||
## Partition maintenance — monthly cron job
|
||||
|
||||
The audit tables are partitioned by month with partitions pre-created through 2027.
|
||||
**A missing partition causes INSERT to fail with a hard error** — no graceful degradation.
|
||||
|
||||
### Setup (run once on the audit PostgreSQL host)
|
||||
|
||||
```bash
|
||||
# Create a login user for the maintenance function
|
||||
# (audit_maintainer_login was created by postgres/audit/init.sql)
|
||||
|
||||
# Add to crontab on the Ubuntu host (or in a scheduled container):
|
||||
crontab -e
|
||||
|
||||
# Run on the 20th of each month at 00:00 UTC — creates next month's partition
|
||||
0 0 20 * * docker exec bd-postgres-audit psql \
|
||||
-U audit_maintainer_login \
|
||||
-d auditdb \
|
||||
-c "SELECT audit.create_next_month_partitions();" \
|
||||
>> /var/log/bd-fhir-partition-maintenance.log 2>&1
|
||||
```
|
||||
|
||||
### Verify partition creation
|
||||
|
||||
```bash
|
||||
# After the cron runs, verify the new partition exists
|
||||
docker exec bd-postgres-audit psql -U postgres -d auditdb -c "
|
||||
SELECT
|
||||
c.relname AS partition_name,
|
||||
pg_get_expr(c.relpartbound, c.oid) AS partition_range
|
||||
FROM pg_class c
|
||||
JOIN pg_inherits i ON i.inhrelid = c.oid
|
||||
JOIN pg_class p ON p.oid = i.inhparent
|
||||
JOIN pg_namespace n ON n.oid = p.relnamespace
|
||||
WHERE n.nspname = 'audit'
|
||||
AND p.relname = 'audit_events'
|
||||
ORDER BY c.relname DESC
|
||||
LIMIT 3;
|
||||
"
|
||||
# Should show the three most recent monthly partitions
|
||||
```
|
||||
|
||||
### Monitor for missing partitions
|
||||
|
||||
Add this check to your monitoring system (Prometheus alerting or cron):
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# check_audit_partitions.sh
|
||||
# Alert if the next month's partition does not exist by the 25th
|
||||
|
||||
NEXT_MONTH=$(date -d "+1 month" +%Y_%m)
|
||||
PARTITION="audit_events_${NEXT_MONTH}"
|
||||
|
||||
RESULT=$(docker exec bd-postgres-audit psql -U postgres -d auditdb -tAc "
|
||||
SELECT COUNT(*) FROM pg_class c
|
||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE n.nspname = 'audit' AND c.relname = '${PARTITION}';")
|
||||
|
||||
if [ "$RESULT" -eq "0" ]; then
|
||||
echo "ALERT: Missing audit partition for next month: ${PARTITION}"
|
||||
# Send to your alerting system (PagerDuty, Slack, email)
|
||||
exit 1
|
||||
fi
|
||||
echo "OK: Partition ${PARTITION} exists"
|
||||
exit 0
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Monitoring — key metrics to track
|
||||
|
||||
These metrics indicate when scaling actions are needed.
|
||||
|
||||
### PostgreSQL — fhir
|
||||
|
||||
```sql
|
||||
-- Connection utilisation (should be <80% of max_connections)
|
||||
SELECT count(*) as active_connections,
|
||||
max_conn,
|
||||
round(100.0 * count(*) / max_conn, 1) as utilisation_pct
|
||||
FROM pg_stat_activity, (SELECT setting::int as max_conn FROM pg_settings WHERE name='max_connections') mc
|
||||
WHERE state = 'active'
|
||||
GROUP BY max_conn;
|
||||
|
||||
-- Table bloat (trigger VACUUM if dead_tuple_ratio > 10%)
|
||||
SELECT relname, n_live_tup, n_dead_tup,
|
||||
round(100.0 * n_dead_tup / NULLIF(n_live_tup + n_dead_tup, 0), 1) as dead_pct
|
||||
FROM pg_stat_user_tables
|
||||
WHERE relname IN ('hfj_resource', 'hfj_spidx_token', 'hfj_res_ver')
|
||||
ORDER BY dead_pct DESC;
|
||||
|
||||
-- Index usage (trigger REINDEX if idx_scan is 0 for a non-new index)
|
||||
SELECT relname, indexrelname, idx_scan, idx_tup_read
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE relname LIKE 'hfj_%'
|
||||
ORDER BY idx_scan ASC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
### PostgreSQL — audit
|
||||
|
||||
```sql
|
||||
-- Partition sizes (plan next archive when any partition exceeds 10GB)
|
||||
SELECT
|
||||
c.relname as partition,
|
||||
pg_size_pretty(pg_relation_size(c.oid)) as size
|
||||
FROM pg_class c
|
||||
JOIN pg_inherits i ON i.inhrelid = c.oid
|
||||
JOIN pg_class p ON p.oid = i.inhparent
|
||||
JOIN pg_namespace n ON n.oid = p.relnamespace
|
||||
WHERE n.nspname = 'audit' AND p.relname = 'audit_events'
|
||||
ORDER BY c.relname DESC;
|
||||
|
||||
-- Rejection rate by vendor (flag vendors with >10% rejection rate)
|
||||
SELECT
|
||||
client_id,
|
||||
COUNT(*) as total_events,
|
||||
SUM(CASE WHEN outcome = 'REJECTED' THEN 1 ELSE 0 END) as rejections,
|
||||
ROUND(100.0 * SUM(CASE WHEN outcome = 'REJECTED' THEN 1 ELSE 0 END) / COUNT(*), 1) as rejection_pct
|
||||
FROM audit.audit_events
|
||||
WHERE event_time > NOW() - INTERVAL '7 days'
|
||||
AND event_type IN ('OPERATION', 'VALIDATION_FAILURE')
|
||||
GROUP BY client_id
|
||||
ORDER BY rejection_pct DESC;
|
||||
```
|
||||
|
||||
### HAPI — Prometheus metrics
|
||||
|
||||
Key metrics exposed at `/actuator/prometheus`:
|
||||
|
||||
| Metric | Alert threshold |
|
||||
|--------|-----------------|
|
||||
| `hikaricp_connections_pending` | >0 for >30s → pool exhaustion |
|
||||
| `hikaricp_connection_timeout_total` | Any increment → pool exhaustion |
|
||||
| `http_server_requests_seconds_max` | >30s → OCL timeout or slow validation |
|
||||
| `jvm_memory_used_bytes / jvm_memory_max_bytes` | >85% → OOM risk, increase container memory |
|
||||
| `process_uptime_seconds` | Resets → unexpected container restart |
|
||||
|
||||
---
|
||||
|
||||
## IG upgrade procedure
|
||||
|
||||
When BD Core IG advances from v0.2.1 to v0.3.0:
|
||||
|
||||
```bash
|
||||
# 1. On CI machine: place new package.tgz in src/main/resources/packages/
|
||||
cp bd.gov.dghs.core-0.3.0.tgz hapi-overlay/src/main/resources/packages/
|
||||
|
||||
# 2. Remove old package (one IG version per image)
|
||||
rm hapi-overlay/src/main/resources/packages/bd.gov.dghs.core-0.2.1.tgz
|
||||
|
||||
# 3. Update application.yaml / docker-compose env vars:
|
||||
# HAPI_IG_PACKAGE_CLASSPATH=classpath:packages/bd.gov.dghs.core-0.3.0.tgz
|
||||
# HAPI_IG_VERSION=0.3.0
|
||||
|
||||
# 4. Build and push new image
|
||||
docker build \
|
||||
--build-arg IG_PACKAGE=bd.gov.dghs.core-0.3.0.tgz \
|
||||
--build-arg BUILD_VERSION=1.1.0 \
|
||||
--build-arg GIT_COMMIT=$(git rev-parse --short HEAD) \
|
||||
-t your-registry.dghs.gov.bd/bd-fhir-hapi:1.1.0 \
|
||||
-f hapi-overlay/Dockerfile .
|
||||
|
||||
docker push your-registry.dghs.gov.bd/bd-fhir-hapi:1.1.0
|
||||
|
||||
# 5. Update HAPI_IMAGE in .env on production server
|
||||
# 6. Rolling redeploy
|
||||
docker-compose --env-file .env pull hapi
|
||||
docker-compose --env-file .env up -d --no-deps hapi
|
||||
|
||||
# 7. Verify new IG version is active
|
||||
curl -s https://fhir.dghs.gov.bd/fhir/metadata | jq '.software.version'
|
||||
# Expected: "0.3.0" or the configured HAPI_IG_VERSION value
|
||||
```
|
||||
|
||||
**Vendor notification:** IG upgrades that change SHALL constraints require
|
||||
vendor notification at least 30 days in advance. Vendors must test against
|
||||
the staging environment before production deployment.
|
||||
893
ops/technical-operations-document.md
Normal file
893
ops/technical-operations-document.md
Normal file
@@ -0,0 +1,893 @@
|
||||
# BD FHIR National — Technical Operations Document
|
||||
|
||||
**System:** National FHIR R4 Repository and Validation Engine
|
||||
**Published by:** DGHS / MoHFW Bangladesh
|
||||
**IG:** BD Core FHIR IG v0.2.1
|
||||
**HAPI FHIR:** 7.2.0
|
||||
**Stack:** Java 17 · Spring Boot 3.2.5 · PostgreSQL 15 · Docker Compose
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [System Purpose and Architecture](#1-system-purpose-and-architecture)
|
||||
2. [Repository Structure](#2-repository-structure)
|
||||
3. [How the System Works](#3-how-the-system-works)
|
||||
4. [Infrastructure Components](#4-infrastructure-components)
|
||||
5. [Security Model](#5-security-model)
|
||||
6. [Validation Pipeline](#6-validation-pipeline)
|
||||
7. [Audit and Forensics](#7-audit-and-forensics)
|
||||
8. [CI/CD Pipeline](#8-cicd-pipeline)
|
||||
9. [First Deployment — Step by Step](#9-first-deployment--step-by-step)
|
||||
10. [Routine Operations](#10-routine-operations)
|
||||
11. [ICD-11 Version Upgrade](#11-icd-11-version-upgrade)
|
||||
12. [Scaling](#12-scaling)
|
||||
13. [Troubleshooting](#13-troubleshooting)
|
||||
14. [Architecture Decisions You Must Not Reverse](#14-architecture-decisions-you-must-not-reverse)
|
||||
|
||||
---
|
||||
|
||||
## 1. System Purpose and Architecture
|
||||
|
||||
This system is the national FHIR R4 repository for Bangladesh. It serves three purposes simultaneously:
|
||||
|
||||
**Repository** — Stores validated FHIR R4 resources submitted by hospitals, clinics, diagnostic labs, and pharmacies (collectively: vendors). No unvalidated resource enters storage.
|
||||
|
||||
**Validation engine** — Every incoming resource is validated against BD Core FHIR IG profiles AND against the national ICD-11 terminology authority (OCL) before storage. Invalid resources are rejected with HTTP 422 and a FHIR OperationOutcome describing exactly what failed.
|
||||
|
||||
**HIE gateway** — Acts as the national Health Information Exchange boundary. The system enforces that only authenticated, authorised, and clinically valid data enters the national record.
|
||||
|
||||
### Traffic flow
|
||||
|
||||
```
|
||||
Vendor system
|
||||
│
|
||||
│ POST /fhir/Condition
|
||||
│ Authorization: Bearer {token}
|
||||
▼
|
||||
Centralised nginx proxy ← TLS termination, routing (managed separately)
|
||||
│
|
||||
▼
|
||||
HAPI server :8080
|
||||
│
|
||||
├─ KeycloakJwtInterceptor ← validates JWT, extracts facility identity
|
||||
├─ ClusterExpressionValidator ← validates ICD-11 cluster expressions
|
||||
├─ RequestValidatingInterceptor ← validates against BD Core IG profiles
|
||||
├─ BdTerminologyValidationSupport ← validates ICD-11 codes against OCL
|
||||
│
|
||||
├─ [ACCEPTED] → HFJ_RESOURCE (postgres-fhir)
|
||||
│ AuditEventEmitter → audit.audit_events (postgres-audit)
|
||||
│
|
||||
└─ [REJECTED] → 422 OperationOutcome to vendor
|
||||
RejectedSubmissionSink → audit.fhir_rejected_submissions (postgres-audit)
|
||||
AuditEventEmitter → audit.audit_events (postgres-audit)
|
||||
```
|
||||
|
||||
### External service dependencies
|
||||
|
||||
| Service | URL | Purpose | Failure behaviour |
|
||||
|---------|-----|---------|-------------------|
|
||||
| Keycloak | `https://auth.dghs.gov.bd/realms/hris` | JWT validation, JWKS | Fail closed — all requests rejected |
|
||||
| OCL | `https://tr.ocl.dghs.gov.bd/api/fhir` | ICD-11 terminology validation | Fail open — resource accepted with audit record |
|
||||
| Cluster validator | `https://icd11.dghs.gov.bd/cluster/validate` | Postcoordinated ICD-11 expressions | Fail open — resource accepted with audit record |
|
||||
|
||||
**Fail-open policy for OCL and cluster validator is deliberate.** Service continuity during external service outages takes precedence over perfect validation coverage. Every fail-open event is recorded in the audit log. OCL or cluster validator outages must be treated as high-priority incidents.
|
||||
|
||||
---
|
||||
|
||||
## 2. Repository Structure
|
||||
|
||||
```
|
||||
bd-fhir-national/
|
||||
├── .env.example ← copy to .env, fill secrets
|
||||
├── docker-compose.yml ← production orchestration
|
||||
├── pom.xml ← parent Maven POM, version pins
|
||||
├── hapi-overlay/
|
||||
│ ├── Dockerfile ← multi-stage build
|
||||
│ ├── pom.xml ← runtime dependencies
|
||||
│ └── src/main/
|
||||
│ ├── java/bd/gov/dghs/fhir/
|
||||
│ │ ├── BdFhirApplication.java ← Spring Boot entry point
|
||||
│ │ ├── audit/
|
||||
│ │ │ ├── AuditEventEmitter.java ← async INSERT to audit_events
|
||||
│ │ │ └── RejectedSubmissionSink.java ← async INSERT to rejected_submissions
|
||||
│ │ ├── config/
|
||||
│ │ │ ├── DataSourceConfig.java ← dual datasource, dual Flyway
|
||||
│ │ │ ├── FhirServerConfig.java ← validation chain, IG loading
|
||||
│ │ │ └── SecurityConfig.java ← interceptor registration
|
||||
│ │ ├── init/
|
||||
│ │ │ └── IgPackageInitializer.java ← advisory lock IG loader
|
||||
│ │ ├── interceptor/
|
||||
│ │ │ ├── AuditEventInterceptor.java ← audit hook
|
||||
│ │ │ └── KeycloakJwtInterceptor.java ← JWT auth
|
||||
│ │ ├── terminology/
|
||||
│ │ │ ├── BdTerminologyValidationSupport.java ← OCL integration
|
||||
│ │ │ └── TerminologyCacheManager.java ← cache flush endpoint
|
||||
│ │ └── validator/
|
||||
│ │ └── ClusterExpressionValidator.java ← cluster expression check
|
||||
│ └── resources/
|
||||
│ ├── application.yaml ← all Spring/HAPI configuration
|
||||
│ ├── logback-spring.xml ← structured JSON logging
|
||||
│ ├── db/migration/
|
||||
│ │ ├── fhir/V1__hapi_schema.sql ← HAPI JPA schema (Flyway)
|
||||
│ │ └── audit/V2__audit_schema.sql ← audit schema (Flyway)
|
||||
│ └── packages/
|
||||
│ └── .gitkeep ← CI places IG .tgz here
|
||||
├── ops/
|
||||
│ ├── deployment-guide.md
|
||||
│ ├── keycloak-setup.md
|
||||
│ ├── project-manifest.md
|
||||
│ ├── scaling-roadmap.md
|
||||
│ └── version-upgrade-integration.md
|
||||
└── postgres/
|
||||
├── fhir/
|
||||
│ ├── init.sql ← template only — replace with init.sh before deploy
|
||||
│ └── postgresql.conf ← PostgreSQL tuning for HAPI workload
|
||||
└── audit/
|
||||
├── init.sql ← template only — replace with init.sh before deploy
|
||||
└── postgresql.conf ← PostgreSQL tuning for audit workload
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. How the System Works
|
||||
|
||||
### Startup sequence
|
||||
|
||||
When a HAPI container starts, the following happens in order. If any step fails, the container exits and Docker restarts it.
|
||||
|
||||
1. **Flyway — FHIR schema** runs `V1__hapi_schema.sql` against `postgres-fhir` using the superuser credential. Creates all HAPI JPA tables, sequences, and indexes. Skipped if already applied.
|
||||
2. **Flyway — Audit schema** runs `V2__audit_schema.sql` against `postgres-audit`. Creates partitioned `audit_events` and `fhir_rejected_submissions` tables with monthly partitions pre-created through 2027. Skipped if already applied.
|
||||
3. **Hibernate validation** checks that the schema exactly matches HAPI's entity mappings (`ddl-auto: validate`). Fails loudly if tables are missing or wrong.
|
||||
4. **IgPackageInitializer** acquires a PostgreSQL advisory lock on `postgres-fhir`, loads the BD Core IG package from the classpath into HAPI's `NpmPackageValidationSupport`, writes metadata to `NPM_PACKAGE` tables, and releases the lock. The advisory lock prevents race conditions when multiple replicas start simultaneously — only one replica writes the metadata row; subsequent replicas find it already present and skip.
|
||||
5. **KeycloakJwtInterceptor** fetches the Keycloak JWKS endpoint and caches the signing keys. If Keycloak is unreachable at startup, the interceptor fails to initialise and the container exits.
|
||||
6. Server begins accepting traffic.
|
||||
|
||||
### Request lifecycle — accepted resource
|
||||
|
||||
```
|
||||
1. KeycloakJwtInterceptor
|
||||
└─ extracts Bearer token from Authorization header
|
||||
└─ verifies signature against cached Keycloak JWKS
|
||||
└─ verifies exp, iss = https://auth.dghs.gov.bd/realms/hris
|
||||
└─ verifies mci-api role present in realm_access or resource_access
|
||||
└─ extracts client_id, sub, sending_facility
|
||||
└─ sets request attributes, populates MDC for log correlation
|
||||
|
||||
2. AuditEventInterceptor (pre-validation hook)
|
||||
└─ invokes ClusterExpressionValidator
|
||||
└─ scans Coding elements with system = http://id.who.int/icd/release/11/mms
|
||||
└─ if icd11-cluster-expression extension present → calls cluster validator middleware
|
||||
└─ if raw postcoordination chars (&, /, %) in code without extension → rejects immediately
|
||||
|
||||
3. RequestValidatingInterceptor
|
||||
└─ runs FhirInstanceValidator against ValidationSupportChain:
|
||||
1. DefaultProfileValidationSupport (base FHIR R4 profiles)
|
||||
2. CommonCodeSystemsTerminologyService (UCUM, MimeType, etc.)
|
||||
3. SnapshotGeneratingValidationSupport (differential → snapshot)
|
||||
4. InMemoryTerminologyServerValidationSupport (cache layer)
|
||||
5. NpmPackageValidationSupport (BD Core IG profiles)
|
||||
6. BdTerminologyValidationSupport (OCL $validate-code for ICD-11)
|
||||
└─ any ERROR severity issue → throws UnprocessableEntityException → 422
|
||||
|
||||
4. HAPI JPA persistence
|
||||
└─ resource written to HFJ_RESOURCE, HFJ_RES_VER, SPIDX tables
|
||||
|
||||
5. AuditEventInterceptor (post-storage hook)
|
||||
└─ async: INSERT into audit.audit_events (outcome = ACCEPTED)
|
||||
|
||||
6. HTTP 201 Created → vendor
|
||||
```
|
||||
|
||||
### Request lifecycle — rejected resource
|
||||
|
||||
```
|
||||
1-3. Same as above up to validation failure
|
||||
|
||||
4. UnprocessableEntityException thrown with FHIR OperationOutcome
|
||||
|
||||
5. AuditEventInterceptor (exception hook)
|
||||
└─ async: INSERT full payload into audit.fhir_rejected_submissions
|
||||
└─ async: INSERT into audit.audit_events (outcome = REJECTED)
|
||||
|
||||
6. HTTP 422 Unprocessable Entity → vendor
|
||||
Body: OperationOutcome with issue[].diagnostics and issue[].expression
|
||||
```
|
||||
|
||||
### ICD-11 terminology validation detail
|
||||
|
||||
`BdTerminologyValidationSupport` intercepts every call to validate an ICD-11 coded element:
|
||||
|
||||
1. **Cache check** — if the code was validated in the last 24 hours, serve result from `ConcurrentHashMap`. No OCL call.
|
||||
2. **Cache miss** — call OCL `$validate-code` with `system=http://id.who.int/icd/release/11/mms`. For `Condition.code`, include `url=https://fhir.dghs.gov.bd/core/ValueSet/bd-condition-icd11-diagnosis-valueset` to enforce the Diagnosis + Finding class restriction.
|
||||
3. **OCL returns result=true** — cache as valid, return valid to chain.
|
||||
4. **OCL returns result=false** — cache as invalid, return error to chain → 422.
|
||||
5. **OCL timeout or 5xx** — log WARN, return null (not supported) — fail open.
|
||||
6. **`$expand` attempts** — `isValueSetSupported()` returns false for ICD-11 ValueSets. `$expand` is never attempted. This is intentional: OCL does not support `$expand`.
|
||||
|
||||
---
|
||||
|
||||
## 4. Infrastructure Components
|
||||
|
||||
### Docker services
|
||||
|
||||
| Service | Image | Purpose | Networks |
|
||||
|---------|-------|---------|----------|
|
||||
| `hapi` | Private registry | HAPI FHIR application | frontend, backend-fhir, backend-audit |
|
||||
| `postgres-fhir` | postgres:15-alpine | FHIR resource store | backend-fhir |
|
||||
| `postgres-audit` | postgres:15-alpine | Immutable audit store | backend-audit |
|
||||
| `pgbouncer-fhir` | bitnami/pgbouncer:1.22.1 | Connection pool → postgres-fhir | backend-fhir |
|
||||
| `pgbouncer-audit` | bitnami/pgbouncer:1.22.1 | Connection pool → postgres-audit | backend-audit |
|
||||
|
||||
### Network isolation
|
||||
|
||||
`backend-fhir` and `backend-audit` are marked `internal: true` — no external internet access from these networks. The database containers cannot reach external services and external services cannot reach the databases directly.
|
||||
|
||||
### pgBouncer configuration
|
||||
|
||||
Both pgBouncer instances run in **session mode**. This is mandatory. HAPI uses Hibernate which relies on prepared statements — transaction mode pgBouncer breaks these. Do not change the pool mode.
|
||||
|
||||
Pool sizing at pilot phase (1 HAPI replica):
|
||||
|
||||
| Pool | HikariCP max per replica | pgBouncer pool_size | PostgreSQL max_connections |
|
||||
|------|--------------------------|--------------------|-----------------------------|
|
||||
| FHIR | 5 | 20 | 30 |
|
||||
| Audit | 2 | 10 | 20 |
|
||||
|
||||
At 3 replicas: 15 FHIR connections, 6 audit connections — both within pool limits.
|
||||
|
||||
### Databases
|
||||
|
||||
**postgres-fhir** contains all HAPI JPA tables. Schema managed by Flyway `V1__hapi_schema.sql`. `ddl-auto: validate` means Hibernate never modifies the schema — Flyway owns all DDL. If a HAPI upgrade requires schema changes, write a new Flyway migration.
|
||||
|
||||
**postgres-audit** contains the audit schema only. Two tables, both partitioned by month. Schema managed by Flyway `V2__audit_schema.sql` against postgres-audit (separate Flyway instance, separate history table `flyway_audit_schema_history`).
|
||||
|
||||
### Volumes
|
||||
|
||||
| Volume | Contents | Backup priority |
|
||||
|--------|----------|-----------------|
|
||||
| `postgres-fhir-data` | All FHIR resources | Critical — primary data |
|
||||
| `postgres-audit-data` | All audit records, rejected payloads | Critical — forensic/legal |
|
||||
| `hapi-logs` | Structured JSON application logs | Medium — operational |
|
||||
|
||||
---
|
||||
|
||||
## 5. Security Model
|
||||
|
||||
### Authentication
|
||||
|
||||
Every request to FHIR endpoints (except `GET /fhir/metadata` and `/actuator/health/**`) requires a valid Bearer token issued by Keycloak realm `hris`.
|
||||
|
||||
`KeycloakJwtInterceptor` performs these checks in order, rejecting with HTTP 401 on any failure:
|
||||
|
||||
1. `Authorization: Bearer` header present and non-empty
|
||||
2. JWT signature valid against Keycloak JWKS (`RS256` only — symmetric algorithms rejected)
|
||||
3. `exp` claim in the future (not expired)
|
||||
4. `iss` claim exactly equals `https://auth.dghs.gov.bd/realms/hris`
|
||||
5. `mci-api` role present in `realm_access.roles` OR in `resource_access.{client-id}.roles`
|
||||
|
||||
The JWKS is cached locally with a 1-hour TTL. On receiving a JWT with an unknown `kid`, the JWKS is immediately re-fetched regardless of TTL — this handles Keycloak key rotation without delay.
|
||||
|
||||
### Authorisation
|
||||
|
||||
**Vendors** — must have `mci-api` role. Client naming convention: `fhir-vendor-{organisation-id}`.
|
||||
|
||||
**Admin operations** (cache flush endpoint) — must have `fhir-admin` role. Only the `fhir-admin-pipeline` service account and DGHS system administrators hold this role.
|
||||
|
||||
### Keycloak client setup for new vendors
|
||||
|
||||
See `ops/keycloak-setup.md` for the full procedure. Summary:
|
||||
|
||||
1. Create client `fhir-vendor-{org-id}` in `hris` realm — confidential, service accounts enabled, standard flow off.
|
||||
2. Assign `mci-api` role to the service account.
|
||||
3. Add `sending_facility` user attribute with the DGHS facility code.
|
||||
4. Add a User Attribute token mapper for `sending_facility` → token claim `sending_facility`.
|
||||
5. Deliver `client_id` and `client_secret` to the vendor.
|
||||
|
||||
If a vendor token is missing the `sending_facility` claim, HAPI logs WARN on every submission and uses `client_id` as the facility identifier in audit records. This is a data quality issue — configure the mapper.
|
||||
|
||||
### Vendor token flow
|
||||
|
||||
```bash
|
||||
# Vendor obtains token
|
||||
POST https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token
|
||||
grant_type=client_credentials
|
||||
client_id=fhir-vendor-{org-id}
|
||||
client_secret={secret}
|
||||
→ { "access_token": "eyJ...", "expires_in": 300 }
|
||||
|
||||
# Vendor submits resource
|
||||
POST https://fhir.dghs.gov.bd/fhir/Condition
|
||||
Authorization: Bearer eyJ...
|
||||
Content-Type: application/fhir+json
|
||||
{ ... }
|
||||
```
|
||||
|
||||
Tokens expire in 5 minutes (Keycloak default). Vendor systems must refresh before expiry.
|
||||
|
||||
---
|
||||
|
||||
## 6. Validation Pipeline
|
||||
|
||||
### BD Core IG profiles
|
||||
|
||||
The following resource types are validated against BD Core IG profiles:
|
||||
|
||||
| Resource type | Profile URL |
|
||||
|---------------|-------------|
|
||||
| Patient | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-patient` |
|
||||
| Condition | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-condition` |
|
||||
| Encounter | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-encounter` |
|
||||
| Observation | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-observation` |
|
||||
| Practitioner | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-practitioner` |
|
||||
| Organization | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-organization` |
|
||||
| Location | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-location` |
|
||||
| Medication | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-medication` |
|
||||
| MedicationRequest | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-medicationrequest` |
|
||||
| Immunization | `https://fhir.dghs.gov.bd/core/StructureDefinition/bd-immunization` |
|
||||
|
||||
Resources of any other type are stored with `meta.tag = https://fhir.dghs.gov.bd/tags|unvalidated-profile`. They are not rejected. They can be queried with `_tag=https://fhir.dghs.gov.bd/tags|unvalidated-profile`.
|
||||
|
||||
### ICD-11 cluster expression format
|
||||
|
||||
BD Core IG defines a specific pattern for postcoordinated ICD-11 expressions. **Raw postcoordinated strings in `Coding.code` are prohibited.**
|
||||
|
||||
**Correct format:**
|
||||
```json
|
||||
"code": {
|
||||
"coding": [{
|
||||
"system": "http://id.who.int/icd/release/11/mms",
|
||||
"code": "1C62.0",
|
||||
"extension": [{
|
||||
"url": "icd11-cluster-expression",
|
||||
"valueString": "1C62.0/http%3A%2F%2Fid.who.int%2F..."
|
||||
}]
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**Prohibited format (rejected with 422):**
|
||||
```json
|
||||
"code": {
|
||||
"coding": [{
|
||||
"system": "http://id.who.int/icd/release/11/mms",
|
||||
"code": "1C62.0&has_severity=mild"
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
### Rejection codes
|
||||
|
||||
The `rejection_code` column in `audit.fhir_rejected_submissions` contains one of:
|
||||
|
||||
| Code | Meaning |
|
||||
|------|---------|
|
||||
| `PROFILE_VIOLATION` | Resource violates a BD Core IG SHALL constraint |
|
||||
| `TERMINOLOGY_INVALID_CODE` | ICD-11 code not found in OCL |
|
||||
| `TERMINOLOGY_INVALID_CLASS` | ICD-11 code exists but is not Diagnosis/Finding class |
|
||||
| `CLUSTER_EXPRESSION_INVALID` | Cluster expression failed cluster validator |
|
||||
| `CLUSTER_STEM_MISSING_EXTENSION` | Raw postcoordinated string without extension |
|
||||
| `AUTH_TOKEN_MISSING` | No Bearer token |
|
||||
| `AUTH_TOKEN_EXPIRED` | Token `exp` in the past |
|
||||
| `AUTH_TOKEN_INVALID_SIGNATURE` | Signature verification failed |
|
||||
| `AUTH_TOKEN_MISSING_ROLE` | `mci-api` role absent |
|
||||
| `AUTH_TOKEN_INVALID_ISSUER` | `iss` does not match Keycloak realm |
|
||||
|
||||
---
|
||||
|
||||
## 7. Audit and Forensics
|
||||
|
||||
### Two audit stores
|
||||
|
||||
**`audit.audit_events`** — one row per request outcome. Always written, accepted and rejected. Contains: `event_type`, `operation`, `resource_type`, `resource_id`, `outcome`, `outcome_detail`, `sending_facility`, `client_id`, `subject`, `request_ip`, `request_id`, `validation_messages` (JSONB).
|
||||
|
||||
**`audit.fhir_rejected_submissions`** — one row per rejected write. Contains: full resource payload as submitted (TEXT, not JSONB), `rejection_code`, `rejection_reason`, `element_path`, `violated_profile`, `invalid_code`, `invalid_system`.
|
||||
|
||||
### Immutability
|
||||
|
||||
The `audit_writer_login` PostgreSQL user has INSERT only on the audit schema. The HAPI JVM connects to postgres-audit as this user. No UPDATE or DELETE is possible from the application layer regardless of what the application code attempts. Only a PostgreSQL superuser can modify audit records.
|
||||
|
||||
### Partitioning
|
||||
|
||||
Both audit tables are partitioned by month (`PARTITION BY RANGE (event_time)`). Monthly partitions are pre-created through December 2027. A cron job must create next-month partitions on the 20th of each month. If this lapses, INSERT fails with a hard error.
|
||||
|
||||
**Set up the cron job immediately after first deployment:**
|
||||
```bash
|
||||
# On the host running postgres-audit
|
||||
crontab -e
|
||||
# Add:
|
||||
0 0 20 * * docker exec bd-postgres-audit psql -U audit_maintainer_login -d auditdb \
|
||||
-c "SELECT audit.create_next_month_partitions();" \
|
||||
>> /var/log/bd-fhir-partition-maintenance.log 2>&1
|
||||
```
|
||||
|
||||
### Useful audit queries
|
||||
|
||||
```sql
|
||||
-- Rejection rate by vendor, last 7 days
|
||||
SELECT client_id,
|
||||
COUNT(*) AS total,
|
||||
SUM(CASE WHEN outcome='REJECTED' THEN 1 ELSE 0 END) AS rejected,
|
||||
ROUND(100.0 * SUM(CASE WHEN outcome='REJECTED' THEN 1 ELSE 0 END) / COUNT(*), 1) AS pct
|
||||
FROM audit.audit_events
|
||||
WHERE event_time > NOW() - INTERVAL '7 days'
|
||||
AND event_type IN ('OPERATION','VALIDATION_FAILURE')
|
||||
GROUP BY client_id ORDER BY pct DESC;
|
||||
|
||||
-- Retrieve rejected payloads for a vendor
|
||||
SELECT submission_time, resource_type, rejection_code, rejection_reason, element_path
|
||||
FROM audit.fhir_rejected_submissions
|
||||
WHERE client_id = 'fhir-vendor-{org-id}'
|
||||
ORDER BY submission_time DESC LIMIT 20;
|
||||
|
||||
-- Auth failures
|
||||
SELECT event_time, client_id, outcome_detail, request_ip
|
||||
FROM audit.audit_events
|
||||
WHERE event_type = 'AUTH_FAILURE'
|
||||
ORDER BY event_time DESC LIMIT 20;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. CI/CD Pipeline
|
||||
|
||||
The production server **never builds**. It only pulls pre-built images from the private registry.
|
||||
|
||||
### CI pipeline steps (on CI machine)
|
||||
|
||||
```bash
|
||||
# 1. Obtain BD Core IG package and place it
|
||||
cp /path/to/bd.gov.dghs.core-0.2.1.tgz \
|
||||
hapi-overlay/src/main/resources/packages/
|
||||
|
||||
# 2. Run tests (TestContainers spins up real PostgreSQL — no H2)
|
||||
mvn test -pl hapi-overlay -am
|
||||
|
||||
# 3. Build Docker image (multi-stage: Maven builder + JRE runtime)
|
||||
docker build \
|
||||
--build-arg IG_PACKAGE=bd.gov.dghs.core-0.2.1.tgz \
|
||||
--build-arg BUILD_VERSION=1.0.0 \
|
||||
--build-arg GIT_COMMIT=$(git rev-parse --short HEAD) \
|
||||
-t your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0 \
|
||||
-f hapi-overlay/Dockerfile \
|
||||
.
|
||||
|
||||
# 4. Push to private registry
|
||||
docker push your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0
|
||||
```
|
||||
|
||||
The `packages/` directory must contain exactly one `.tgz` file matching `HAPI_IG_PACKAGE_CLASSPATH` in `.env`. If the directory is empty or the filename does not match, the container fails startup immediately with a clear error message.
|
||||
|
||||
---
|
||||
|
||||
## 9. First Deployment — Step by Step
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Ubuntu 22.04 LTS, minimum 8GB RAM, 4 vCPU, 100GB disk
|
||||
- Outbound HTTPS to Keycloak, OCL, cluster validator, private registry
|
||||
- Docker image already built and pushed (see Section 8)
|
||||
- Keycloak configured (see `ops/keycloak-setup.md`)
|
||||
|
||||
### Step 1 — Install Docker
|
||||
|
||||
```bash
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y ca-certificates curl
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg \
|
||||
-o /etc/apt/keyrings/docker.asc
|
||||
sudo chmod a+r /etc/apt/keyrings/docker.asc
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] \
|
||||
https://download.docker.com/linux/ubuntu \
|
||||
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y docker-ce docker-ce-cli containerd.io \
|
||||
docker-buildx-plugin docker-compose-plugin
|
||||
sudo usermod -aG docker $USER
|
||||
# log out and back in
|
||||
```
|
||||
|
||||
### Step 2 — Prepare application directory
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /opt/bd-fhir-national
|
||||
sudo chown $USER:$USER /opt/bd-fhir-national
|
||||
# rsync project files from CI/deployment machine (excluding source tree)
|
||||
rsync -avz --exclude='.git' --exclude='hapi-overlay/target' \
|
||||
--exclude='hapi-overlay/src' \
|
||||
./bd-fhir-national/ deploy@server:/opt/bd-fhir-national/
|
||||
```
|
||||
|
||||
### Step 3 — Create .env
|
||||
|
||||
```bash
|
||||
cd /opt/bd-fhir-national
|
||||
cp .env.example .env
|
||||
chmod 600 .env
|
||||
nano .env # fill all <CHANGE_ME> values
|
||||
# verify: grep CHANGE_ME .env should return nothing
|
||||
```
|
||||
|
||||
### Step 4 — Fix init scripts (CRITICAL — do not skip)
|
||||
|
||||
The `postgres/fhir/init.sql` and `postgres/audit/init.sql` files are templates with placeholder passwords. PostgreSQL Docker does not perform variable substitution in `.sql` files. Replace them with `.sh` scripts that read from environment variables.
|
||||
|
||||
```bash
|
||||
# FHIR database init script
|
||||
cat > /opt/bd-fhir-national/postgres/fhir/init.sh <<'EOF'
|
||||
#!/bin/bash
|
||||
set -e
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
DO \$\$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${FHIR_DB_APP_USER}') THEN
|
||||
CREATE USER ${FHIR_DB_APP_USER} WITH NOSUPERUSER NOCREATEDB NOCREATEROLE
|
||||
NOINHERIT LOGIN CONNECTION LIMIT 30 PASSWORD '${FHIR_DB_APP_PASSWORD}';
|
||||
END IF;
|
||||
END \$\$;
|
||||
GRANT CONNECT ON DATABASE ${POSTGRES_DB} TO ${FHIR_DB_APP_USER};
|
||||
GRANT USAGE ON SCHEMA public TO ${FHIR_DB_APP_USER};
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO ${FHIR_DB_APP_USER};
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public
|
||||
GRANT USAGE, SELECT ON SEQUENCES TO ${FHIR_DB_APP_USER};
|
||||
EOSQL
|
||||
EOF
|
||||
chmod +x /opt/bd-fhir-national/postgres/fhir/init.sh
|
||||
|
||||
# Audit database init script
|
||||
cat > /opt/bd-fhir-national/postgres/audit/init.sh <<'EOF'
|
||||
#!/bin/bash
|
||||
set -e
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
DO \$\$ BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${AUDIT_DB_WRITER_USER}') THEN
|
||||
CREATE USER ${AUDIT_DB_WRITER_USER} WITH NOSUPERUSER NOCREATEDB NOCREATEROLE
|
||||
NOINHERIT LOGIN CONNECTION LIMIT 20 PASSWORD '${AUDIT_DB_WRITER_PASSWORD}';
|
||||
END IF;
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${AUDIT_DB_MAINTAINER_USER}') THEN
|
||||
CREATE USER ${AUDIT_DB_MAINTAINER_USER} WITH NOSUPERUSER NOCREATEDB NOCREATEROLE
|
||||
NOINHERIT LOGIN CONNECTION LIMIT 5 PASSWORD '${AUDIT_DB_MAINTAINER_PASSWORD}';
|
||||
END IF;
|
||||
END \$\$;
|
||||
GRANT CONNECT ON DATABASE ${POSTGRES_DB} TO ${AUDIT_DB_WRITER_USER};
|
||||
GRANT CONNECT ON DATABASE ${POSTGRES_DB} TO ${AUDIT_DB_MAINTAINER_USER};
|
||||
EOSQL
|
||||
EOF
|
||||
chmod +x /opt/bd-fhir-national/postgres/audit/init.sh
|
||||
```
|
||||
|
||||
Update `docker-compose.yml` — in both postgres services, change the init volume mount from `.sql` to `.sh`, and pass the necessary env vars to `postgres-audit`:
|
||||
|
||||
```yaml
|
||||
# postgres-fhir volumes: change
|
||||
- ./postgres/fhir/init.sh:/docker-entrypoint-initdb.d/init.sh:ro
|
||||
# add to postgres-fhir environment:
|
||||
FHIR_DB_APP_USER: ${FHIR_DB_APP_USER}
|
||||
FHIR_DB_APP_PASSWORD: ${FHIR_DB_APP_PASSWORD}
|
||||
|
||||
# postgres-audit volumes: change
|
||||
- ./postgres/audit/init.sh:/docker-entrypoint-initdb.d/init.sh:ro
|
||||
# add to postgres-audit environment:
|
||||
AUDIT_DB_WRITER_USER: ${AUDIT_DB_WRITER_USER}
|
||||
AUDIT_DB_WRITER_PASSWORD: ${AUDIT_DB_WRITER_PASSWORD}
|
||||
AUDIT_DB_MAINTAINER_USER: ${AUDIT_DB_MAINTAINER_USER}
|
||||
AUDIT_DB_MAINTAINER_PASSWORD: ${AUDIT_DB_MAINTAINER_PASSWORD}
|
||||
```
|
||||
|
||||
### Step 5 — Registry login
|
||||
|
||||
```bash
|
||||
docker login your-registry.dghs.gov.bd
|
||||
docker compose --env-file .env pull
|
||||
```
|
||||
|
||||
### Step 6 — Start databases
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env up -d postgres-fhir postgres-audit
|
||||
# wait for healthy
|
||||
until docker compose --env-file .env ps postgres-fhir | grep -q "healthy"; do sleep 3; done
|
||||
until docker compose --env-file .env ps postgres-audit | grep -q "healthy"; do sleep 3; done
|
||||
```
|
||||
|
||||
### Step 7 — Verify database users
|
||||
|
||||
```bash
|
||||
docker exec bd-postgres-fhir psql -U postgres -d fhirdb \
|
||||
-c "SELECT rolname FROM pg_roles WHERE rolname='hapi_app';"
|
||||
# Expected: hapi_app
|
||||
|
||||
docker exec bd-postgres-audit psql -U postgres -d auditdb \
|
||||
-c "SELECT rolname FROM pg_roles WHERE rolname IN ('audit_writer_login','audit_maintainer_login');"
|
||||
# Expected: two rows
|
||||
```
|
||||
|
||||
### Step 8 — Start pgBouncer and HAPI
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env up -d pgbouncer-fhir pgbouncer-audit
|
||||
docker compose --env-file .env up -d hapi
|
||||
|
||||
# Follow startup — takes 60-120 seconds
|
||||
docker compose --env-file .env logs -f hapi
|
||||
```
|
||||
|
||||
Expected log sequence:
|
||||
```
|
||||
Running FHIR Flyway migrations... → V1 applied
|
||||
Running Audit Flyway migrations... → V2 applied
|
||||
Advisory lock acquired... → IG loading begins
|
||||
BD Core IG package loaded... → IG ready
|
||||
BdTerminologyValidationSupport initialised...
|
||||
KeycloakJwtInterceptor initialised...
|
||||
HAPI RestfulServer interceptors registered...
|
||||
Tomcat started on port(s): 8080
|
||||
Started BdFhirApplication in XX seconds
|
||||
```
|
||||
|
||||
### Step 9 — Verify health
|
||||
|
||||
```bash
|
||||
# Internal (direct to HAPI)
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s http://localhost:8080/actuator/health | jq .
|
||||
# All components must show status: UP
|
||||
|
||||
# FHIR metadata
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s http://localhost:8080/fhir/metadata | jq '.software'
|
||||
# Expected: { "name": "BD FHIR National Repository", "version": "0.2.1" }
|
||||
```
|
||||
|
||||
### Step 10 — Set up partition maintenance cron
|
||||
|
||||
```bash
|
||||
crontab -e
|
||||
# Add:
|
||||
0 0 20 * * docker exec bd-postgres-audit psql -U audit_maintainer_login -d auditdb \
|
||||
-c "SELECT audit.create_next_month_partitions();" \
|
||||
>> /var/log/bd-fhir-partition-maintenance.log 2>&1
|
||||
```
|
||||
|
||||
### Step 11 — Run acceptance tests
|
||||
|
||||
Run all tests from Section 9.3 of `ops/deployment-guide.md`. All nine must pass before the system is declared production-ready.
|
||||
|
||||
---
|
||||
|
||||
## 10. Routine Operations
|
||||
|
||||
### View logs
|
||||
|
||||
```bash
|
||||
# All services
|
||||
docker compose --env-file .env logs -f
|
||||
|
||||
# HAPI logs as structured JSON
|
||||
docker compose --env-file .env logs -f hapi | jq -R 'try fromjson'
|
||||
|
||||
# Filter for rejections
|
||||
docker compose --env-file .env logs hapi | \
|
||||
jq -R 'try fromjson | select(.message | test("rejected|REJECTED"))'
|
||||
```
|
||||
|
||||
### Deploy a new image version
|
||||
|
||||
```bash
|
||||
# Update image tag in .env
|
||||
nano /opt/bd-fhir-national/.env
|
||||
# Change HAPI_IMAGE to new tag
|
||||
|
||||
# Pull and redeploy
|
||||
docker compose --env-file .env pull hapi
|
||||
docker compose --env-file .env up -d --no-deps hapi
|
||||
|
||||
# Verify startup
|
||||
docker compose --env-file .env logs -f hapi
|
||||
```
|
||||
|
||||
### Scale HAPI replicas
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env up -d --scale hapi=3
|
||||
# No other configuration changes needed at 3 replicas.
|
||||
# pgBouncer pool_size=20 supports up to 4 replicas at HikariCP max=5.
|
||||
# At 5+ replicas: increase PGBOUNCER_DEFAULT_POOL_SIZE and postgres max_connections first.
|
||||
```
|
||||
|
||||
### Restart a service
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env restart hapi
|
||||
docker compose --env-file .env restart postgres-fhir # causes brief HAPI downtime
|
||||
```
|
||||
|
||||
### Full stack restart
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env down
|
||||
docker compose --env-file .env up -d
|
||||
```
|
||||
|
||||
### Check pgBouncer pool status
|
||||
|
||||
```bash
|
||||
docker exec bd-pgbouncer-fhir psql -h localhost -p 5432 -U pgbouncer pgbouncer \
|
||||
-c "SHOW POOLS;"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. ICD-11 Version Upgrade
|
||||
|
||||
When a new ICD-11 MMS release is imported into OCL, the HAPI terminology cache becomes stale. The upgrade pipeline must flush the cache after OCL import. Full procedure in `ops/version-upgrade-integration.md`. Summary:
|
||||
|
||||
**Order is mandatory:**
|
||||
1. OCL: import new ICD-11 concepts
|
||||
2. OCL: patch `concept_class` for Diagnosis + Finding
|
||||
3. OCL: repopulate `bd-condition-icd11-diagnosis-valueset`
|
||||
4. OCL: verify `$validate-code` returns correct results for new codes
|
||||
5. HAPI: flush terminology cache
|
||||
6. HAPI: verify new codes validate correctly
|
||||
|
||||
**Step 5 — cache flush:**
|
||||
```bash
|
||||
# Get fhir-admin token
|
||||
ADMIN_TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-admin-pipeline" \
|
||||
-d "client_secret=${FHIR_ADMIN_CLIENT_SECRET}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
# Flush — run from inside Docker network (admin endpoint is network-restricted)
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s -X DELETE \
|
||||
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
|
||||
http://localhost:8080/admin/terminology/cache | jq .
|
||||
# Expected: { "status": "flushed", "entriesEvicted": N }
|
||||
```
|
||||
|
||||
**IG version upgrade** (when BD Core IG advances to a new version):
|
||||
1. Place new `.tgz` in `src/main/resources/packages/`, remove old one.
|
||||
2. Update `HAPI_IG_PACKAGE_CLASSPATH` and `HAPI_IG_VERSION` in `.env`.
|
||||
3. Build and push new Docker image on CI machine.
|
||||
4. Deploy new image on production server.
|
||||
|
||||
---
|
||||
|
||||
## 12. Scaling
|
||||
|
||||
### Current capacity (Phase 1 — Pilot)
|
||||
|
||||
| Metric | Capacity |
|
||||
|--------|----------|
|
||||
| HAPI replicas | 1 |
|
||||
| Vendors | <50 |
|
||||
| Resources/day | <10,000 |
|
||||
| PostgreSQL connections (FHIR) | 5 |
|
||||
| PostgreSQL connections (Audit) | 2 |
|
||||
|
||||
### Scaling to Phase 2 (Regional — up to 500 vendors, 100,000 resources/day)
|
||||
|
||||
```bash
|
||||
# Scale HAPI to 3 replicas — no other changes required
|
||||
docker compose --env-file .env up -d --scale hapi=3
|
||||
```
|
||||
|
||||
Beyond 3 replicas, update pgBouncer pool sizes and PostgreSQL `max_connections` before scaling. See `ops/scaling-roadmap.md` for the full capacity matrix and Phase 3 (national scale → Kubernetes) guidance.
|
||||
|
||||
---
|
||||
|
||||
## 13. Troubleshooting
|
||||
|
||||
### Container not starting
|
||||
|
||||
```bash
|
||||
docker compose --env-file .env logs hapi | tail -50
|
||||
```
|
||||
|
||||
| Log message | Cause | Fix |
|
||||
|-------------|-------|-----|
|
||||
| `STARTUP FAILURE: BD Core IG package not found` | `.tgz` missing from image | Rebuild image with package in `packages/` |
|
||||
| `FHIR Flyway configuration missing` | `SPRING_FLYWAY_*` env vars not set | Check `.env` |
|
||||
| `password authentication failed for user "hapi_app"` | `init.sh` not run or wrong password | Verify Step 4 of deployment, check `.env` passwords |
|
||||
| `Advisory lock acquisition timed out` | Another replica holding lock and crashed mid-init | Check `pg_locks` on postgres-fhir, kill stale lock |
|
||||
| `Connection refused` to Keycloak JWKS | Keycloak unreachable at startup | Check network connectivity, Keycloak health |
|
||||
| `Schema-validation: missing table` | Flyway did not run | Check `SPRING_FLYWAY_*` env vars, check flyway_schema_history table |
|
||||
|
||||
### 401 on all authenticated requests
|
||||
|
||||
```bash
|
||||
# Check JWKS endpoint is reachable from inside the container
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/certs | jq '.keys | length'
|
||||
# Expected: 1 or more keys
|
||||
```
|
||||
|
||||
If JWKS is unreachable, all requests will be rejected with 401 (fail closed). Check firewall rules — the HAPI container must have outbound HTTPS to Keycloak.
|
||||
|
||||
### 422 on all ICD-11 coded submissions
|
||||
|
||||
```bash
|
||||
# Check OCL is reachable
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s -o /dev/null -w "%{http_code}" \
|
||||
"https://tr.ocl.dghs.gov.bd/api/fhir/metadata"
|
||||
# Expected: 200
|
||||
|
||||
# Check a specific code manually
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s "https://tr.ocl.dghs.gov.bd/api/fhir/ValueSet/\$validate-code?\
|
||||
url=https://fhir.dghs.gov.bd/core/ValueSet/bd-condition-icd11-diagnosis-valueset\
|
||||
&system=http://id.who.int/icd/release/11/mms&code=1C62.0" | jq .
|
||||
```
|
||||
|
||||
If OCL is unreachable, the system should be fail-open (codes accepted). If codes are being rejected despite OCL being reachable, check OCL's `$validate-code` response directly.
|
||||
|
||||
### Audit writes failing
|
||||
|
||||
```bash
|
||||
# Check HAPI logs for "AUDIT WRITE FAILED"
|
||||
docker compose --env-file .env logs hapi | grep "AUDIT WRITE FAILED"
|
||||
|
||||
# Check audit datasource health
|
||||
docker exec $(docker compose --env-file .env ps -q hapi | head -1) \
|
||||
curl -s http://localhost:8080/actuator/health | jq '.components.auditDb'
|
||||
```
|
||||
|
||||
### Partition missing (INSERT to audit failing)
|
||||
|
||||
```bash
|
||||
# Check which partitions exist
|
||||
docker exec bd-postgres-audit psql -U postgres -d auditdb -c "
|
||||
SELECT c.relname FROM pg_class c
|
||||
JOIN pg_inherits i ON i.inhrelid = c.oid
|
||||
JOIN pg_class p ON p.oid = i.inhparent
|
||||
JOIN pg_namespace n ON n.oid = p.relnamespace
|
||||
WHERE n.nspname = 'audit' AND p.relname = 'audit_events'
|
||||
ORDER BY c.relname DESC LIMIT 3;"
|
||||
|
||||
# Create missing partition manually
|
||||
docker exec bd-postgres-audit psql -U postgres -d auditdb \
|
||||
-c "SELECT audit.create_next_month_partitions();"
|
||||
```
|
||||
|
||||
### Check disk usage
|
||||
|
||||
```bash
|
||||
docker system df -v
|
||||
df -h /var/lib/docker
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 14. Architecture Decisions You Must Not Reverse
|
||||
|
||||
These decisions are load-bearing. Reversing any of them without fully understanding the consequences will break the system.
|
||||
|
||||
**PostgreSQL only — no H2, not even for tests.**
|
||||
The test suite uses TestContainers to spin up real PostgreSQL 15. H2 is not on the classpath. Using H2 masks database-specific behaviour (advisory locks, partitioning, JSONB) and produces false-green test results.
|
||||
|
||||
**Validation on ALL requests — no vendor exemptions.**
|
||||
The `RequestValidatingInterceptor` runs on every write. There is no per-vendor or per-resource-type bypass. This is the HIE boundary enforcement. A bypass for one vendor breaks the national data quality guarantee for everyone downstream.
|
||||
|
||||
**OCL is the single terminology authority.**
|
||||
There is no local ICD-11 concept store. All ICD-11 validation goes to OCL. This means OCL availability affects HAPI validation quality. Keep OCL healthy. Do not add a local fallback without understanding the implications for version consistency.
|
||||
|
||||
**`$expand` is never attempted for ICD-11 ValueSets.**
|
||||
OCL does not support `$expand`. The `isValueSetSupported()` override returns `false` for all ICD-11 ValueSets. Do not remove this — removing it causes HAPI to attempt `$expand`, receive an empty response, and reject every ICD-11 coded resource regardless of whether the code is valid.
|
||||
|
||||
**pgBouncer must remain in session mode.**
|
||||
Hibernate uses prepared statements and advisory locks. Transaction mode pgBouncer breaks both. Do not change `PGBOUNCER_POOL_MODE` to `transaction`.
|
||||
|
||||
**Flyway owns all DDL — Hibernate never modifies schema.**
|
||||
`ddl-auto: validate` means Hibernate will refuse to start if the schema does not match its entities, but it will never ALTER or CREATE tables. If a HAPI upgrade changes entity mappings, write a Flyway migration. Never change `ddl-auto` to `update` in production.
|
||||
|
||||
**Audit writes are append-only.**
|
||||
The `audit_writer_login` PostgreSQL user has INSERT only. The application cannot UPDATE or DELETE audit records regardless of what the code does. This is enforced at the database level. Do not grant additional privileges to this user.
|
||||
|
||||
**The IG package is bundled in the Docker image.**
|
||||
The `.tgz` is a build-time artifact, not a runtime configuration. There is no hot-reload. An IG upgrade requires a new Docker image build and deployment. This is by design — it ties IG version to container version, making deployments auditable and rollbacks clean.
|
||||
434
ops/version-upgrade-integration.md
Normal file
434
ops/version-upgrade-integration.md
Normal file
@@ -0,0 +1,434 @@
|
||||
# ICD-11 Version Upgrade — HAPI Integration
|
||||
|
||||
**Audience:** ICD-11 Terminology Pipeline team, DGHS FHIR ops
|
||||
**Related:** `version_upgrade.py` (OCL import pipeline)
|
||||
**HAPI endpoint:** `DELETE /admin/terminology/cache`
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
When a new ICD-11 MMS release is imported into OCL, the HAPI server's
|
||||
24-hour terminology validation cache becomes stale. Vendors submitting
|
||||
resources after the import — but before the cache expires — will have their
|
||||
ICD-11 codes validated against the **old** OCL data. New codes from the
|
||||
new release will be incorrectly rejected as invalid (cache miss → OCL hit
|
||||
with old data → cached as invalid). Removed or reclassified codes that were
|
||||
previously valid will continue to be accepted from cache.
|
||||
|
||||
**The cache flush endpoint resolves this.** Calling it after OCL import
|
||||
forces the next validation call for every ICD-11 code to hit OCL directly,
|
||||
repopulating the cache with the new version's data.
|
||||
|
||||
---
|
||||
|
||||
## Step-by-step upgrade procedure
|
||||
|
||||
The following steps must be executed **in this exact order**. Deviating
|
||||
from the order (e.g., flushing before OCL import completes) causes the
|
||||
cache to repopulate with old data and requires a second flush.
|
||||
|
||||
```
|
||||
Step 1 OCL: import new ICD-11 MMS release
|
||||
Step 2 OCL: patch concept_class for Diagnosis + Finding concepts
|
||||
Step 3 OCL: repopulate bd-condition-icd11-diagnosis-valueset collection
|
||||
Step 4 OCL: verify $validate-code returns correct results for new codes
|
||||
Step 5 HAPI: flush terminology cache ← this document
|
||||
Step 6 HAPI: verify validation with new codes
|
||||
Step 7 DGHS: notify vendors of new release
|
||||
```
|
||||
|
||||
Steps 1-4 are handled by `version_upgrade.py`. This document covers
|
||||
Steps 5-6 and the exact integration between the two systems.
|
||||
|
||||
---
|
||||
|
||||
## Step 4 — Pre-flush verification (run before calling HAPI)
|
||||
|
||||
Before flushing the HAPI cache, verify that OCL is serving correct results
|
||||
for the new release. Flushing a cache backed by an incorrect OCL state
|
||||
degrades validation quality.
|
||||
|
||||
### 4a — Verify a new code is valid in OCL
|
||||
|
||||
Pick a code that is **new** in this release (not in the previous release).
|
||||
|
||||
```bash
|
||||
NEW_CODE="XY9Z" # Replace with an actual new code from the release notes
|
||||
|
||||
curl -s "https://tr.ocl.dghs.gov.bd/api/fhir/ValueSet/\$validate-code\
|
||||
?url=https://fhir.dghs.gov.bd/core/ValueSet/bd-condition-icd11-diagnosis-valueset\
|
||||
&system=http://id.who.int/icd/release/11/mms\
|
||||
&code=${NEW_CODE}" | jq '.parameter[] | select(.name=="result") | .valueBoolean'
|
||||
|
||||
# Expected: true
|
||||
```
|
||||
|
||||
### 4b — Verify a Device-class code is rejected by OCL
|
||||
|
||||
Device-class codes must be rejected by the bd-condition-icd11-diagnosis-valueset
|
||||
(which restricts to Diagnosis + Finding only).
|
||||
|
||||
```bash
|
||||
DEVICE_CODE="XA7RE2" # Example Device class code — use an actual one
|
||||
|
||||
curl -s "https://tr.ocl.dghs.gov.bd/api/fhir/ValueSet/\$validate-code\
|
||||
?url=https://fhir.dghs.gov.bd/core/ValueSet/bd-condition-icd11-diagnosis-valueset\
|
||||
&system=http://id.who.int/icd/release/11/mms\
|
||||
&code=${DEVICE_CODE}" | jq '.parameter[] | select(.name=="result") | .valueBoolean'
|
||||
|
||||
# Expected: false
|
||||
```
|
||||
|
||||
### 4c — Verify a deprecated code is invalid
|
||||
|
||||
If this release deprecates or removes any codes, verify they are now rejected.
|
||||
|
||||
```bash
|
||||
DEPRECATED_CODE="..." # From release notes
|
||||
|
||||
curl -s "https://tr.ocl.dghs.gov.bd/api/fhir/ValueSet/\$validate-code\
|
||||
?url=https://fhir.dghs.gov.bd/core/ValueSet/bd-condition-icd11-diagnosis-valueset\
|
||||
&system=http://id.who.int/icd/release/11/mms\
|
||||
&code=${DEPRECATED_CODE}" | jq '.parameter[] | select(.name=="result") | .valueBoolean'
|
||||
|
||||
# Expected: false (if deprecated) or true (if still valid)
|
||||
```
|
||||
|
||||
Do not proceed to Step 5 until all 4a-4c verifications pass.
|
||||
|
||||
---
|
||||
|
||||
## Step 5 — Flush the HAPI terminology cache
|
||||
|
||||
### 5a — Obtain fhir-admin token
|
||||
|
||||
The cache flush endpoint requires the `fhir-admin` Keycloak role.
|
||||
The `fhir-admin-pipeline` client is the designated service account for
|
||||
this operation (see `ops/keycloak-setup.md`, Part 2).
|
||||
|
||||
```python
|
||||
# In version_upgrade.py — add this function
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
KEYCLOAK_TOKEN_URL = "https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token"
|
||||
FHIR_ADMIN_CLIENT_ID = "fhir-admin-pipeline"
|
||||
FHIR_ADMIN_CLIENT_SECRET = os.environ["FHIR_ADMIN_CLIENT_SECRET"] # from secrets vault
|
||||
HAPI_BASE_URL = "https://fhir.dghs.gov.bd"
|
||||
|
||||
|
||||
def get_fhir_admin_token() -> str:
|
||||
"""Obtain a fhir-admin Bearer token from Keycloak."""
|
||||
response = requests.post(
|
||||
KEYCLOAK_TOKEN_URL,
|
||||
data={
|
||||
"grant_type": "client_credentials",
|
||||
"client_id": FHIR_ADMIN_CLIENT_ID,
|
||||
"client_secret": FHIR_ADMIN_CLIENT_SECRET,
|
||||
},
|
||||
timeout=30,
|
||||
)
|
||||
response.raise_for_status()
|
||||
token_data = response.json()
|
||||
access_token = token_data["access_token"]
|
||||
|
||||
# Verify the token contains fhir-admin role before using it
|
||||
# (parse middle segment of JWT)
|
||||
import base64
|
||||
payload_b64 = access_token.split(".")[1]
|
||||
# Add padding if needed
|
||||
payload_b64 += "=" * (4 - len(payload_b64) % 4)
|
||||
claims = json.loads(base64.b64decode(payload_b64))
|
||||
|
||||
realm_roles = claims.get("realm_access", {}).get("roles", [])
|
||||
if "fhir-admin" not in realm_roles:
|
||||
raise ValueError(
|
||||
f"fhir-admin-pipeline token does not contain fhir-admin role. "
|
||||
f"Roles present: {realm_roles}. "
|
||||
f"Check Keycloak service account role assignment."
|
||||
)
|
||||
|
||||
return access_token
|
||||
```
|
||||
|
||||
### 5b — Check cache state before flush (optional but recommended)
|
||||
|
||||
```python
|
||||
def get_cache_stats(admin_token: str) -> dict:
|
||||
"""Retrieve current HAPI terminology cache statistics."""
|
||||
response = requests.get(
|
||||
f"{HAPI_BASE_URL}/admin/terminology/cache/stats",
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=30,
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
|
||||
# Usage:
|
||||
stats_before = get_cache_stats(admin_token)
|
||||
print(f"Cache before flush: {stats_before['totalEntries']} entries "
|
||||
f"({stats_before['liveEntries']} live, "
|
||||
f"{stats_before['expiredEntries']} expired)")
|
||||
```
|
||||
|
||||
### 5c — Execute cache flush
|
||||
|
||||
```python
|
||||
def flush_hapi_terminology_cache(admin_token: str) -> dict:
|
||||
"""
|
||||
Flush the HAPI ICD-11 terminology validation cache.
|
||||
|
||||
Must be called AFTER:
|
||||
- OCL ICD-11 import is complete
|
||||
- concept_class patch is applied
|
||||
- bd-condition-icd11-diagnosis-valueset is repopulated
|
||||
- $validate-code verified returning correct results
|
||||
|
||||
Returns the flush summary from HAPI.
|
||||
Raises requests.HTTPError on failure.
|
||||
"""
|
||||
response = requests.delete(
|
||||
f"{HAPI_BASE_URL}/admin/terminology/cache",
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=60, # allow time for HAPI to process across all replicas
|
||||
)
|
||||
|
||||
if response.status_code == 403:
|
||||
raise PermissionError(
|
||||
"Cache flush rejected: fhir-admin role not present in token. "
|
||||
"Check Keycloak fhir-admin-pipeline service account configuration."
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
result = response.json()
|
||||
print(f"HAPI cache flush completed: {result['entriesEvicted']} entries evicted "
|
||||
f"at {result['timestamp']}")
|
||||
return result
|
||||
|
||||
|
||||
# Full upgrade function to add to version_upgrade.py:
|
||||
def post_ocl_import_hapi_integration(icd11_version: str) -> None:
|
||||
"""
|
||||
Call after successful OCL import and verification.
|
||||
Flushes HAPI cache and verifies the new version validates correctly.
|
||||
|
||||
Args:
|
||||
icd11_version: The new ICD-11 version string, e.g. "2025-01"
|
||||
"""
|
||||
print(f"\n=== HAPI integration: ICD-11 {icd11_version} ===")
|
||||
|
||||
# Step 5a: get admin token
|
||||
print("Obtaining fhir-admin token...")
|
||||
admin_token = get_fhir_admin_token()
|
||||
print("Token obtained.")
|
||||
|
||||
# Step 5b: record pre-flush state
|
||||
stats_before = get_cache_stats(admin_token)
|
||||
print(f"Pre-flush cache: {stats_before['totalEntries']} entries")
|
||||
|
||||
# Step 5c: flush
|
||||
print("Flushing HAPI terminology cache...")
|
||||
flush_result = flush_hapi_terminology_cache(admin_token)
|
||||
print(f"Flush complete: {flush_result['entriesEvicted']} entries evicted")
|
||||
|
||||
# Step 6: post-flush verification (see below)
|
||||
verify_hapi_validates_new_version(admin_token, icd11_version)
|
||||
|
||||
print(f"=== HAPI integration complete for ICD-11 {icd11_version} ===\n")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 6 — Post-flush verification
|
||||
|
||||
After the flush, verify that HAPI is now validating against the new OCL data.
|
||||
This confirms the end-to-end pipeline from OCL → HAPI cache → vendor validation.
|
||||
|
||||
### 6a — Submit a test Condition with a new ICD-11 code
|
||||
|
||||
The test resource must be submitted by the `fhir-admin-pipeline` client.
|
||||
Note: the admin client has `fhir-admin` role but the FHIR resource endpoints
|
||||
require `mci-api` role. Use a dedicated test vendor client for resource
|
||||
submission, or temporarily assign `mci-api` to the admin client for testing.
|
||||
|
||||
**Recommended approach:** use a dedicated test vendor client
|
||||
(`fhir-vendor-test-pipeline`) with `mci-api` role for post-upgrade verification.
|
||||
|
||||
```python
|
||||
def verify_hapi_validates_new_version(
|
||||
admin_token: str, icd11_version: str) -> None:
|
||||
"""
|
||||
Verifies HAPI is now accepting codes from the new ICD-11 version.
|
||||
Uses the $validate-code operation directly against HAPI (not resource submission)
|
||||
to avoid needing mci-api role on the admin client.
|
||||
|
||||
Note: HAPI's $validate-code endpoint proxies to OCL via the validation chain.
|
||||
A successful result confirms the cache was flushed AND OCL is returning
|
||||
correct results for the new version.
|
||||
"""
|
||||
# Use a known-valid code from the new release
|
||||
# This should be parameterised with the actual new code from release notes
|
||||
test_code = get_test_code_for_version(icd11_version) # implement per release
|
||||
valueset_url = (
|
||||
"https://fhir.dghs.gov.bd/core/ValueSet/"
|
||||
"bd-condition-icd11-diagnosis-valueset"
|
||||
)
|
||||
|
||||
response = requests.get(
|
||||
f"{HAPI_BASE_URL}/fhir/ValueSet/$validate-code",
|
||||
params={
|
||||
"url": valueset_url,
|
||||
"system": "http://id.who.int/icd/release/11/mms",
|
||||
"code": test_code,
|
||||
},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
if response.status_code == 401:
|
||||
# $validate-code requires mci-api — use a vendor test token here
|
||||
print("WARNING: $validate-code requires mci-api role. "
|
||||
"Skipping HAPI direct verification. "
|
||||
"Verify manually by submitting a test Condition resource.")
|
||||
return
|
||||
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
valid = next(
|
||||
(p["valueBoolean"] for p in result.get("parameter", [])
|
||||
if p["name"] == "result"),
|
||||
None
|
||||
)
|
||||
|
||||
if valid is True:
|
||||
print(f"✓ HAPI verification passed: code '{test_code}' "
|
||||
f"valid in new ICD-11 {icd11_version}")
|
||||
else:
|
||||
message = next(
|
||||
(p.get("valueString") for p in result.get("parameter", [])
|
||||
if p["name"] == "message"),
|
||||
"no message"
|
||||
)
|
||||
raise ValueError(
|
||||
f"HAPI verification FAILED: code '{test_code}' rejected after cache flush. "
|
||||
f"Message: {message}. "
|
||||
f"Check OCL import completed correctly for ICD-11 {icd11_version}."
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Integration into version_upgrade.py — call site
|
||||
|
||||
Add to the end of your main upgrade function, after the OCL verification steps:
|
||||
|
||||
```python
|
||||
def run_upgrade(icd11_version: str) -> None:
|
||||
"""Main upgrade entry point."""
|
||||
|
||||
# --- Existing steps (your current implementation) ---
|
||||
print(f"Starting ICD-11 {icd11_version} upgrade...")
|
||||
|
||||
# 1. Import ICD-11 concepts into OCL
|
||||
import_concepts_to_ocl(icd11_version)
|
||||
|
||||
# 2. Patch concept_class for Diagnosis + Finding
|
||||
patch_concept_class(icd11_version)
|
||||
|
||||
# 3. Repopulate bd-condition-icd11-diagnosis-valueset
|
||||
repopulate_condition_valueset(icd11_version)
|
||||
|
||||
# 4. Verify OCL $validate-code
|
||||
verify_ocl_validate_code(icd11_version)
|
||||
|
||||
# --- New: HAPI integration ---
|
||||
# 5-6. Flush HAPI cache and verify
|
||||
post_ocl_import_hapi_integration(icd11_version)
|
||||
|
||||
# 7. Notify vendors
|
||||
notify_vendors_of_upgrade(icd11_version)
|
||||
|
||||
print(f"ICD-11 {icd11_version} upgrade complete.")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Environment variables required by version_upgrade.py
|
||||
|
||||
Add to your upgrade pipeline's secrets configuration:
|
||||
|
||||
```bash
|
||||
# Keycloak admin client for HAPI cache management
|
||||
FHIR_ADMIN_CLIENT_SECRET=<secret from keycloak-setup.md Part 2>
|
||||
|
||||
# HAPI server base URL
|
||||
HAPI_BASE_URL=https://fhir.dghs.gov.bd
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Rollback procedure
|
||||
|
||||
If post-flush verification fails (HAPI is not accepting new codes):
|
||||
|
||||
1. **Do not re-run the flush** — the cache is already empty, re-flushing has no effect.
|
||||
2. Check OCL directly: `curl https://tr.ocl.dghs.gov.bd/api/fhir/ValueSet/$validate-code?...`
|
||||
3. If OCL is returning wrong results: the OCL import is incomplete. Re-run steps 1-4.
|
||||
4. If OCL is returning correct results but HAPI is not: check HAPI logs for OCL
|
||||
connectivity errors. OCL may have returned HTTP 5xx during the first post-flush
|
||||
validation call, triggering fail-open behaviour.
|
||||
5. After fixing OCL: flush the cache again (it has repopulated with bad data).
|
||||
|
||||
```bash
|
||||
# Emergency manual flush via curl
|
||||
ADMIN_TOKEN=$(curl -s -X POST \
|
||||
"https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/token" \
|
||||
-d "grant_type=client_credentials" \
|
||||
-d "client_id=fhir-admin-pipeline" \
|
||||
-d "client_secret=${FHIR_ADMIN_CLIENT_SECRET}" \
|
||||
| jq -r '.access_token')
|
||||
|
||||
curl -s -X DELETE \
|
||||
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
|
||||
https://fhir.dghs.gov.bd/admin/terminology/cache | jq .
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Cache warm-up after flush
|
||||
|
||||
The HAPI cache repopulates organically as vendors submit resources.
|
||||
There is no pre-warming mechanism. The first vendor submission after a flush
|
||||
for each code will take up to 10 seconds (OCL timeout) rather than sub-millisecond
|
||||
(cache hit). At pilot scale (50 vendors, <36,941 distinct codes in use),
|
||||
this is acceptable.
|
||||
|
||||
At national scale, consider a pre-warming job that submits $validate-code requests
|
||||
for the top-N most frequently submitted ICD-11 codes immediately after the flush.
|
||||
The top-N list is derivable from the `audit.audit_events` table:
|
||||
|
||||
```sql
|
||||
SELECT invalid_code, COUNT(*) as frequency
|
||||
FROM audit.fhir_rejected_submissions
|
||||
WHERE rejection_code = 'TERMINOLOGY_INVALID_CODE'
|
||||
AND submission_time > NOW() - INTERVAL '90 days'
|
||||
GROUP BY invalid_code
|
||||
ORDER BY frequency DESC
|
||||
LIMIT 100;
|
||||
-- Invert: these are rejected codes. Use accepted codes from audit_events instead.
|
||||
|
||||
SELECT
|
||||
(validation_messages ->> 0) as code_info,
|
||||
COUNT(*) as frequency
|
||||
FROM audit.audit_events
|
||||
WHERE outcome = 'ACCEPTED'
|
||||
AND resource_type = 'Condition'
|
||||
AND event_time > NOW() - INTERVAL '90 days'
|
||||
GROUP BY 1
|
||||
ORDER BY frequency DESC
|
||||
LIMIT 200;
|
||||
```
|
||||
318
pom.xml
Normal file
318
pom.xml
Normal file
@@ -0,0 +1,318 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
|
||||
https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<!-- =========================================================
|
||||
BD Core FHIR National Repository — HAPI Overlay Module
|
||||
This module produces the fat JAR that runs in the container.
|
||||
All runtime dependencies declared here.
|
||||
========================================================= -->
|
||||
|
||||
<parent>
|
||||
<groupId>bd.gov.dghs</groupId>
|
||||
<artifactId>bd-fhir-national</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<relativePath>../pom.xml</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>hapi-overlay</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>BD FHIR National — HAPI Overlay</name>
|
||||
<description>
|
||||
Custom HAPI FHIR overlay for the BD national FHIR repository.
|
||||
Includes: Keycloak JWT interceptor, BD Core IG validation chain,
|
||||
OCL terminology integration, cluster expression validator,
|
||||
audit event emitter, and rejected submission sink.
|
||||
</description>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<!-- =======================================================
|
||||
HAPI FHIR CORE — JPA server stack
|
||||
Versions managed by hapi-fhir-bom in parent POM.
|
||||
======================================================= -->
|
||||
|
||||
<!-- JPA server starter — brings in Spring Boot web, JPA,
|
||||
Hibernate, Jackson, and HAPI servlet infrastructure -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-jpaserver-starter</artifactId>
|
||||
<!-- Version from BOM. Do NOT pin version here. -->
|
||||
</dependency>
|
||||
|
||||
<!-- FHIR R4 model classes — Patient, Condition, AuditEvent, etc. -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-structures-r4</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Validation support framework — IValidationSupport chain -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-validation</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Validation resources — built-in FHIR R4 profiles and
|
||||
code system content (LOINC, SNOMED stubs, etc.) -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-validation-resources-r4</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- NPM package support — loads BD Core IG package.tgz
|
||||
via NpmPackageValidationSupport -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-npm-packages</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Remote terminology service — base class for our custom
|
||||
BdTerminologyValidationSupport. We extend this to force
|
||||
$validate-code and suppress $expand. -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-terminology</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- IInstanceValidator — used by FhirValidator to run
|
||||
profile validation on submitted resources -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-validation-resources-r4</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
SPRING BOOT STARTERS
|
||||
Versions managed by spring-boot-dependencies in parent.
|
||||
======================================================= -->
|
||||
|
||||
<!-- Web MVC — embedded Tomcat, DispatcherServlet -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-web</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- JPA / Hibernate — HAPI JPA persistence layer -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-data-jpa</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Actuator — /actuator/health, /actuator/info, /actuator/metrics.
|
||||
Health endpoints used by load balancer liveness/readiness probes.
|
||||
Custom AuditDataSourceHealthIndicator registered here. -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-actuator</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Validation (Bean Validation / Hibernate Validator) —
|
||||
used for @Valid on REST controller inputs -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-validation</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
DATABASE
|
||||
======================================================= -->
|
||||
|
||||
<!-- PostgreSQL JDBC driver — runtime only, not needed at compile -->
|
||||
<dependency>
|
||||
<groupId>org.postgresql</groupId>
|
||||
<artifactId>postgresql</artifactId>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
|
||||
<!-- Flyway core — schema migration engine.
|
||||
Runs V1__hapi_schema.sql and V2__audit_schema.sql on startup
|
||||
before HAPI JPA initialises. -->
|
||||
<dependency>
|
||||
<groupId>org.flywaydb</groupId>
|
||||
<artifactId>flyway-core</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Flyway PostgreSQL dialect — required for Flyway 10+.
|
||||
Without this artifact, Flyway silently skips migrations
|
||||
against PostgreSQL datasources. -->
|
||||
<dependency>
|
||||
<groupId>org.flywaydb</groupId>
|
||||
<artifactId>flyway-database-postgresql</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- HikariCP — connection pool.
|
||||
Spring Boot auto-configures HikariCP when it is on classpath.
|
||||
Explicit declaration ensures version alignment with parent BOM. -->
|
||||
<dependency>
|
||||
<groupId>com.zaxxer</groupId>
|
||||
<artifactId>HikariCP</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
SECURITY — JWT VALIDATION
|
||||
======================================================= -->
|
||||
|
||||
<!-- Nimbus JOSE+JWT — JWT parsing, signature verification,
|
||||
and JWKS remote key set with cache.
|
||||
Used by KeycloakJwtInterceptor.
|
||||
RemoteJWKSet provides kid-based cache invalidation:
|
||||
keys cached 1 hour, re-fetched on unknown kid. -->
|
||||
<dependency>
|
||||
<groupId>com.nimbusds</groupId>
|
||||
<artifactId>nimbus-jose-jwt</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
HTTP CLIENT — OCL and cluster validator calls
|
||||
======================================================= -->
|
||||
|
||||
<!-- Apache HttpClient 5 — used by BdTerminologyValidationSupport
|
||||
for OCL $validate-code calls, and ClusterExpressionValidator
|
||||
for https://icd11.dghs.gov.bd/cluster/validate calls.
|
||||
Separate from the HttpClient that HAPI uses internally
|
||||
(HAPI uses its own managed instance). -->
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents.client5</groupId>
|
||||
<artifactId>httpclient5</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
OBSERVABILITY
|
||||
======================================================= -->
|
||||
|
||||
<!-- Micrometer Prometheus registry — exposes /actuator/prometheus
|
||||
for Prometheus scraping. Optional but included from day one
|
||||
for national-scale observability readiness. -->
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-registry-prometheus</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
UTILITIES
|
||||
======================================================= -->
|
||||
|
||||
<!-- Jackson — JSON serialisation for audit log payloads,
|
||||
OCL API responses, cluster validator responses.
|
||||
Managed by Spring Boot BOM. -->
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.datatype</groupId>
|
||||
<artifactId>jackson-datatype-jsr310</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- SLF4J / Logback — Spring Boot default logging.
|
||||
Logback configured in application.yaml for structured JSON
|
||||
output suitable for ELK ingestion. -->
|
||||
<dependency>
|
||||
<groupId>net.logstash.logback</groupId>
|
||||
<artifactId>logstash-logback-encoder</artifactId>
|
||||
<version>7.4</version>
|
||||
</dependency>
|
||||
|
||||
<!-- =======================================================
|
||||
TEST DEPENDENCIES
|
||||
======================================================= -->
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-test</artifactId>
|
||||
<scope>test</scope>
|
||||
<!-- Excludes vintage JUnit 4 engine — JUnit 5 only -->
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.junit.vintage</groupId>
|
||||
<artifactId>junit-vintage-engine</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<!-- TestContainers — PostgreSQL container for integration tests.
|
||||
Tests spin up a real PostgreSQL 15 container, run Flyway
|
||||
migrations, and validate the full persistence layer.
|
||||
Never use H2 — not even in tests. -->
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>postgresql</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<!-- HAPI FHIR test utilities — FhirContext in tests -->
|
||||
<dependency>
|
||||
<groupId>ca.uhn.hapi.fhir</groupId>
|
||||
<artifactId>hapi-fhir-test-utilities</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<!-- WireMock — mock OCL and cluster validator in unit tests.
|
||||
Allows testing 422 rejection paths without live OCL. -->
|
||||
<dependency>
|
||||
<groupId>org.wiremock</groupId>
|
||||
<artifactId>wiremock-standalone</artifactId>
|
||||
<version>3.5.4</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
|
||||
<!-- Spring Boot Maven plugin — repackages JAR as fat JAR
|
||||
and embeds build-info.properties for /actuator/info.
|
||||
Configured in parent pluginManagement. -->
|
||||
<plugin>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-maven-plugin</artifactId>
|
||||
<!-- Configuration inherited from parent pluginManagement -->
|
||||
</plugin>
|
||||
|
||||
<!-- Compiler plugin — Java 17, inherited from parent -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
</plugin>
|
||||
|
||||
<!-- Surefire — JUnit 5, inherited from parent -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
</plugin>
|
||||
|
||||
<!-- Resources plugin — ensures packages/ directory with
|
||||
bd.gov.dghs.core-0.2.1.tgz is included in the fat JAR
|
||||
under classpath:packages/ -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<configuration>
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>src/main/resources</directory>
|
||||
<filtering>false</filtering>
|
||||
<!-- filtering=false is critical: the .tgz is binary.
|
||||
Maven resource filtering on binary files corrupts them. -->
|
||||
</resource>
|
||||
</resources>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
||||
</plugins>
|
||||
|
||||
<!-- Ensure the fat JAR is named predictably for Docker COPY -->
|
||||
<finalName>bd-fhir-hapi</finalName>
|
||||
|
||||
</build>
|
||||
|
||||
</project>
|
||||
55
postgres/audit/init.sql
Normal file
55
postgres/audit/init.sql
Normal file
@@ -0,0 +1,55 @@
|
||||
-- =============================================================================
|
||||
-- postgres/audit/init.sql
|
||||
-- Runs once on first container start (postgres-audit).
|
||||
-- Creates login users for audit_writer and audit_maintainer roles.
|
||||
-- Role privileges are granted by V2 Flyway migration.
|
||||
-- =============================================================================
|
||||
|
||||
-- audit_writer_login: login user that maps to audit_writer role
|
||||
-- Used by HAPI audit datasource. INSERT only on audit schema.
|
||||
CREATE USER audit_writer_login WITH
|
||||
NOSUPERUSER
|
||||
NOCREATEDB
|
||||
NOCREATEROLE
|
||||
NOINHERIT -- does not automatically inherit role privileges
|
||||
LOGIN
|
||||
CONNECTION LIMIT 20 -- hard cap: prevents connection exhaustion
|
||||
PASSWORD 'PLACEHOLDER_REPLACED_BY_ENTRYPOINT';
|
||||
-- NOTE: Actual password is set by the postgres Docker entrypoint
|
||||
-- reading AUDIT_DB_WRITER_PASSWORD from environment. This CREATE USER
|
||||
-- is a template — the entrypoint rewrites the password on init.
|
||||
-- In practice, use the POSTGRES_* env vars pattern and manage user
|
||||
-- creation via an init script that reads env vars:
|
||||
|
||||
-- Grant the audit_writer role to the login user
|
||||
-- (role created by V2 migration — this runs after migration on first start)
|
||||
-- This GRANT is idempotent — safe to re-run.
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'audit_writer') THEN
|
||||
GRANT audit_writer TO audit_writer_login;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- audit_maintainer_login: login user for partition maintenance cron job
|
||||
CREATE USER audit_maintainer_login WITH
|
||||
NOSUPERUSER
|
||||
NOCREATEDB
|
||||
NOCREATEROLE
|
||||
NOINHERIT
|
||||
LOGIN
|
||||
CONNECTION LIMIT 5
|
||||
PASSWORD 'PLACEHOLDER_REPLACED_BY_ENTRYPOINT';
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'audit_maintainer') THEN
|
||||
GRANT audit_maintainer TO audit_maintainer_login;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Grant connect on database to both login users
|
||||
GRANT CONNECT ON DATABASE auditdb TO audit_writer_login;
|
||||
GRANT CONNECT ON DATABASE auditdb TO audit_maintainer_login;
|
||||
55
postgres/audit/postgresql.conf
Normal file
55
postgres/audit/postgresql.conf
Normal file
@@ -0,0 +1,55 @@
|
||||
# =============================================================================
|
||||
# postgres/audit/postgresql.conf
|
||||
# PostgreSQL 15 configuration for the audit database.
|
||||
# Container memory limit: 1GB (lighter than FHIR store).
|
||||
# Workload: INSERT-heavy (audit events), occasional SELECT (analytics).
|
||||
#
|
||||
# For 1GB container:
|
||||
# shared_buffers = 256MB
|
||||
# effective_cache_size = 768MB
|
||||
# work_mem = 4MB
|
||||
# maintenance_work_mem = 100MB
|
||||
# =============================================================================
|
||||
|
||||
max_connections = 20
|
||||
superuser_reserved_connections = 3
|
||||
|
||||
shared_buffers = 256MB
|
||||
effective_cache_size = 768MB
|
||||
work_mem = 4MB
|
||||
maintenance_work_mem = 100MB
|
||||
|
||||
wal_buffers = 8MB
|
||||
checkpoint_completion_target = 0.9
|
||||
synchronous_commit = on
|
||||
|
||||
random_page_cost = 1.1
|
||||
effective_io_concurrency = 200
|
||||
|
||||
# Logging
|
||||
log_destination = stderr
|
||||
logging_collector = off
|
||||
log_min_messages = WARNING
|
||||
log_min_error_statement = ERROR
|
||||
log_min_duration_statement = 500
|
||||
log_line_prefix = '%t [%p] %u@%d '
|
||||
log_checkpoints = on
|
||||
log_lock_waits = on
|
||||
log_temp_files = 0
|
||||
|
||||
# Autovacuum — partitioned tables need careful autovacuum tuning.
|
||||
# Each monthly partition is a separate physical table for autovacuum purposes.
|
||||
autovacuum = on
|
||||
autovacuum_max_workers = 3
|
||||
autovacuum_naptime = 60s
|
||||
|
||||
timezone = 'UTC'
|
||||
log_timezone = 'UTC'
|
||||
|
||||
lc_messages = 'en_US.UTF-8'
|
||||
lc_monetary = 'en_US.UTF-8'
|
||||
lc_numeric = 'en_US.UTF-8'
|
||||
lc_time = 'en_US.UTF-8'
|
||||
|
||||
track_io_timing = on
|
||||
track_counts = on
|
||||
38
postgres/fhir/init.sql
Normal file
38
postgres/fhir/init.sql
Normal file
@@ -0,0 +1,38 @@
|
||||
-- =============================================================================
|
||||
-- postgres/fhir/init.sql
|
||||
-- Runs once on first container start (postgres-fhir).
|
||||
-- Creates the application user that HAPI uses at runtime.
|
||||
-- Flyway migrations run as superuser separately.
|
||||
-- =============================================================================
|
||||
|
||||
-- Application user — read/write to HAPI JPA tables
|
||||
-- Password injected from FHIR_DB_APP_PASSWORD environment variable
|
||||
-- via docker-compose. The \getenv syntax requires psql — use DO block instead.
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = current_setting('app.db_user', true)) THEN
|
||||
-- User created by the entrypoint using POSTGRES_* env vars equivalent.
|
||||
-- This script creates it explicitly for auditability.
|
||||
NULL;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Create app user. Password set via environment variable substitution
|
||||
-- in the Docker entrypoint. The actual CREATE USER is handled by
|
||||
-- the entrypoint script reading FHIR_DB_APP_USER/PASSWORD env vars.
|
||||
-- This script grants the necessary privileges after user creation.
|
||||
|
||||
-- Grant connect
|
||||
GRANT CONNECT ON DATABASE fhirdb TO hapi_app;
|
||||
|
||||
-- Grant schema usage and object privileges
|
||||
-- Flyway creates all tables as superuser; we then grant hapi_app access.
|
||||
-- These grants run after Flyway migrations on first startup via Spring Boot
|
||||
-- ApplicationListener — see DataSourceConfig.java.
|
||||
-- Pre-grant public schema access:
|
||||
GRANT USAGE ON SCHEMA public TO hapi_app;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO hapi_app;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public
|
||||
GRANT USAGE, SELECT ON SEQUENCES TO hapi_app;
|
||||
81
postgres/fhir/postgresql.conf
Normal file
81
postgres/fhir/postgresql.conf
Normal file
@@ -0,0 +1,81 @@
|
||||
# =============================================================================
|
||||
# postgres/fhir/postgresql.conf
|
||||
# PostgreSQL 15 configuration tuned for HAPI FHIR JPA workload.
|
||||
# Container memory limit: 2GB (set in docker-compose deploy.resources).
|
||||
#
|
||||
# Tuning methodology:
|
||||
# shared_buffers = 25% of container RAM
|
||||
# effective_cache = 75% of container RAM
|
||||
# work_mem = (RAM - shared_buffers) / (max_connections * 2)
|
||||
# maintenance_work_mem = 10% of RAM
|
||||
#
|
||||
# For 2GB container:
|
||||
# shared_buffers = 512MB
|
||||
# effective_cache_size = 1536MB
|
||||
# work_mem = 8MB (conservative — many parallel queries)
|
||||
# maintenance_work_mem = 200MB
|
||||
# =============================================================================
|
||||
|
||||
# Connection settings
|
||||
# max_connections must be > pgBouncer pool_size to leave headroom for
|
||||
# superuser connections (Flyway, maintenance).
|
||||
# pgBouncer pool_size=20 + 5 superuser = 25 total.
|
||||
max_connections = 30
|
||||
superuser_reserved_connections = 3
|
||||
|
||||
# Memory
|
||||
shared_buffers = 512MB
|
||||
effective_cache_size = 1536MB
|
||||
work_mem = 8MB
|
||||
maintenance_work_mem = 200MB
|
||||
|
||||
# Write performance
|
||||
# wal_buffers: 16MB is good for write-heavy workloads
|
||||
wal_buffers = 16MB
|
||||
checkpoint_completion_target = 0.9
|
||||
# synchronous_commit=on: do not disable — data integrity is non-negotiable
|
||||
# for a national health record system.
|
||||
synchronous_commit = on
|
||||
|
||||
# Query planner
|
||||
# random_page_cost=1.1: appropriate for SSD storage (not spinning disk).
|
||||
# If storage is HDD, set to 4.0.
|
||||
random_page_cost = 1.1
|
||||
effective_io_concurrency = 200
|
||||
|
||||
# Logging — errors and slow queries only
|
||||
# log_min_duration_statement: log queries taking >500ms.
|
||||
# Adjust down to 100ms if you want more visibility during initial deployment.
|
||||
log_destination = stderr
|
||||
logging_collector = off # Docker captures stderr directly
|
||||
log_min_messages = WARNING
|
||||
log_min_error_statement = ERROR
|
||||
log_min_duration_statement = 500
|
||||
log_line_prefix = '%t [%p] %u@%d '
|
||||
log_checkpoints = on
|
||||
log_connections = off # pgBouncer already logs connections
|
||||
log_disconnections = off
|
||||
log_lock_waits = on
|
||||
log_temp_files = 0
|
||||
|
||||
# Autovacuum — keep defaults but tune for HAPI's high-write token tables
|
||||
autovacuum = on
|
||||
autovacuum_max_workers = 3
|
||||
autovacuum_naptime = 30s
|
||||
# HFJ_SPIDX_TOKEN is written heavily — lower threshold for autovacuum
|
||||
autovacuum_vacuum_scale_factor = 0.02
|
||||
autovacuum_analyze_scale_factor = 0.01
|
||||
|
||||
# Timezone — all timestamps in UTC
|
||||
timezone = 'UTC'
|
||||
log_timezone = 'UTC'
|
||||
|
||||
# Locale
|
||||
lc_messages = 'en_US.UTF-8'
|
||||
lc_monetary = 'en_US.UTF-8'
|
||||
lc_numeric = 'en_US.UTF-8'
|
||||
lc_time = 'en_US.UTF-8'
|
||||
|
||||
# Statistics
|
||||
track_io_timing = on
|
||||
track_counts = on
|
||||
Reference in New Issue
Block a user