first commit

This commit is contained in:
2026-03-16 00:02:58 +06:00
commit c11f0bd5bc
36 changed files with 11938 additions and 0 deletions

206
hapi-overlay/Dockerfile Normal file
View File

@@ -0,0 +1,206 @@
# =============================================================================
# BD FHIR National — HAPI Overlay Dockerfile
# Multi-stage build: Maven builder + lean JRE runtime
#
# BUILD (CI machine):
# docker build \
# --build-arg IG_PACKAGE=bd.gov.dghs.core-0.2.1.tgz \
# --build-arg BUILD_VERSION=1.0.0 \
# --build-arg GIT_COMMIT=$(git rev-parse --short HEAD) \
# -t your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0 \
# -f hapi-overlay/Dockerfile \
# .
#
# PUSH:
# docker push your-registry.dghs.gov.bd/bd-fhir-hapi:1.0.0
#
# The production server never builds — it only pulls.
# The IG package.tgz must be present at:
# hapi-overlay/src/main/resources/packages/${IG_PACKAGE}
# before the build runs. CI pipeline is responsible for placing it there.
#
# IG VERSION UPGRADE:
# 1. Drop new package.tgz into src/main/resources/packages/
# 2. Update IG_PACKAGE build arg to new filename
# 3. Rebuild and push new image tag
# 4. Redeploy via docker-compose pull + up
# 5. Call cache flush endpoint (see ops/version-upgrade-integration.md)
# =============================================================================
# -----------------------------------------------------------------------------
# STAGE 1: Builder
# Uses full Maven + JDK image. Result discarded after JAR is built.
# Only the fat JAR is carried forward to the runtime stage.
# -----------------------------------------------------------------------------
FROM maven:3.9.6-eclipse-temurin-17 AS builder
LABEL stage=builder
WORKDIR /build
# Copy parent POM first — allows Docker layer caching to skip dependency
# download if only source code changes (not POM dependencies).
COPY pom.xml ./pom.xml
COPY hapi-overlay/pom.xml ./hapi-overlay/pom.xml
# Download all dependencies into the Maven local repository cache layer.
# This layer is invalidated only when a POM file changes.
# On a CI machine with layer caching enabled, this saves 3-5 minutes
# per build when only Java source changes.
RUN mvn dependency:go-offline \
--batch-mode \
--no-transfer-progress \
-pl hapi-overlay \
-am
# Now copy source — this layer changes on every code commit.
COPY hapi-overlay/src ./hapi-overlay/src
# Build fat JAR. Skip tests here — tests run in a separate CI stage
# against TestContainers before the Docker build is invoked.
# If your CI runs tests inside Docker, remove -DskipTests.
RUN mvn package \
--batch-mode \
--no-transfer-progress \
-pl hapi-overlay \
-am \
-DskipTests \
-Dspring-boot.repackage.skip=false
# Verify the fat JAR was produced with the expected name
RUN ls -lh /build/hapi-overlay/target/bd-fhir-hapi.jar && \
echo "JAR size: $(du -sh /build/hapi-overlay/target/bd-fhir-hapi.jar | cut -f1)"
# -----------------------------------------------------------------------------
# STAGE 2: Runtime
# Lean JRE image — no JDK, no Maven, no build tools.
# Attack surface reduced. Image size ~300MB vs ~800MB for builder.
# -----------------------------------------------------------------------------
FROM eclipse-temurin:17-jre-jammy AS runtime
# Build arguments — embedded in image labels for traceability.
# Every production image must be traceable to a specific git commit
# and IG version. If you cannot answer "what IG version is running",
# you cannot validate your validation engine.
ARG BUILD_VERSION=unknown
ARG GIT_COMMIT=unknown
ARG IG_PACKAGE=unknown
ARG BUILD_TIMESTAMP
# Set default build timestamp if not provided
RUN if [ -z "${BUILD_TIMESTAMP}" ]; then BUILD_TIMESTAMP=$(date -u +%Y-%m-%dT%H:%M:%SZ); fi
LABEL org.opencontainers.image.title="BD FHIR National HAPI Server" \
org.opencontainers.image.description="National FHIR R4 repository and validation engine, Bangladesh" \
org.opencontainers.image.vendor="DGHS/MoHFW Bangladesh" \
org.opencontainers.image.version="${BUILD_VERSION}" \
org.opencontainers.image.revision="${GIT_COMMIT}" \
bd.gov.dghs.ig.version="${IG_PACKAGE}" \
bd.gov.dghs.fhir.version="R4" \
bd.gov.dghs.hapi.version="7.2.0"
# -----------------------------------------------------------------------------
# SYSTEM SETUP
# -----------------------------------------------------------------------------
# Create non-root user. Running as root inside a container is a security
# vulnerability — if the JVM is exploited, the attacker gets root on the host
# if the container runs privileged or has volume mounts.
RUN groupadd --gid 10001 hapi && \
useradd --uid 10001 --gid hapi --shell /bin/false --no-create-home hapi
# Install curl for Docker health checks.
# tini: init process to reap zombie processes and forward signals correctly.
# Without tini, SIGTERM from docker stop is not forwarded to the JVM and
# the container is killed after the stop timeout (ungraceful shutdown).
RUN apt-get update && \
apt-get install -y --no-install-recommends \
curl \
tini \
&& rm -rf /var/lib/apt/lists/*
# Application directory
WORKDIR /app
# -----------------------------------------------------------------------------
# COPY ARTIFACTS FROM BUILDER
# -----------------------------------------------------------------------------
COPY --from=builder /build/hapi-overlay/target/bd-fhir-hapi.jar /app/bd-fhir-hapi.jar
# Set correct ownership — hapi user must be able to read the JAR
RUN chown hapi:hapi /app/bd-fhir-hapi.jar
# -----------------------------------------------------------------------------
# RUNTIME CONFIGURATION
# -----------------------------------------------------------------------------
# Switch to non-root user before any further commands
USER hapi
# JVM tuning arguments.
# These are defaults — override via JAVA_OPTS environment variable
# in docker-compose.yml for environment-specific tuning.
#
# -XX:+UseContainerSupport
# Enables JVM to read CPU/memory limits from cgroup (Docker constraints).
# Without this, JVM reads host machine memory and over-allocates heap.
# Available since Java 8u191 — always present in temurin:17.
#
# -XX:MaxRAMPercentage=75.0
# Heap = 75% of container memory limit.
# For a 2GB container: heap = 1.5GB. Remaining 512MB for non-heap
# (Metaspace, thread stacks, code cache, direct buffers).
# HAPI 7.x with full IG loaded needs ~512MB heap minimum.
# Recommended container memory: 2GB minimum, 4GB for production.
#
# -XX:+ExitOnOutOfMemoryError
# Kill the JVM immediately on OOM instead of limping along in a broken
# state. Docker will restart the container. Prefer clean restart over
# degraded service.
#
# -Djava.security.egd=file:/dev/urandom
# Prevents SecureRandom from blocking on /dev/random in containerised
# environments where hardware entropy is limited.
# Critical for JWT validation performance — Nimbus JOSE uses SecureRandom.
ENV JAVA_OPTS="\
-XX:+UseContainerSupport \
-XX:MaxRAMPercentage=75.0 \
-XX:+ExitOnOutOfMemoryError \
-XX:+HeapDumpOnOutOfMemoryError \
-XX:HeapDumpPath=/tmp/heapdump.hprof \
-Djava.security.egd=file:/dev/urandom \
-Dfile.encoding=UTF-8 \
-Duser.timezone=UTC"
# Spring profile — overridable via environment variable in docker-compose
ENV SPRING_PROFILES_ACTIVE=prod
# FHIR server base URL — must match nginx configuration
ENV HAPI_FHIR_SERVER_ADDRESS=https://fhir.dghs.gov.bd/fhir
# Expose HTTP port. nginx terminates TLS and proxies to this port.
# Do NOT expose this port directly — it must only be reachable via nginx.
EXPOSE 8080
# Health check — used by Docker and docker-compose depends_on condition.
# /actuator/health returns 200 when application is fully started and
# all health indicators pass (including the custom AuditDataSourceHealthIndicator).
# --fail-with-body: return non-zero exit on HTTP error responses.
# start_period: allow 120s for startup (IG loading + Flyway migrations).
HEALTHCHECK \
--interval=30s \
--timeout=10s \
--start-period=120s \
--retries=3 \
CMD curl --fail --silent --show-error \
http://localhost:8080/actuator/health/liveness || exit 1
# -----------------------------------------------------------------------------
# ENTRYPOINT
# tini as PID 1 → JVM as child process.
# tini handles SIGTERM correctly: forwards to JVM, waits for graceful
# shutdown, then exits. Without tini, docker stop sends SIGTERM to PID 1
# (the JVM) but the JVM may ignore it depending on signal handling setup.
# -----------------------------------------------------------------------------
ENTRYPOINT ["/usr/bin/tini", "--"]
CMD ["sh", "-c", "exec java ${JAVA_OPTS} -jar /app/bd-fhir-hapi.jar"]

318
hapi-overlay/pom.xml Normal file
View File

@@ -0,0 +1,318 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<!-- =========================================================
BD Core FHIR National Repository — HAPI Overlay Module
This module produces the fat JAR that runs in the container.
All runtime dependencies declared here.
========================================================= -->
<parent>
<groupId>bd.gov.dghs</groupId>
<artifactId>bd-fhir-national</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<artifactId>hapi-overlay</artifactId>
<packaging>jar</packaging>
<name>BD FHIR National — HAPI Overlay</name>
<description>
Custom HAPI FHIR overlay for the BD national FHIR repository.
Includes: Keycloak JWT interceptor, BD Core IG validation chain,
OCL terminology integration, cluster expression validator,
audit event emitter, and rejected submission sink.
</description>
<dependencies>
<!-- =======================================================
HAPI FHIR CORE — JPA server stack
Versions managed by hapi-fhir-bom in parent POM.
======================================================= -->
<!-- JPA server starter — brings in Spring Boot web, JPA,
Hibernate, Jackson, and HAPI servlet infrastructure -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-jpaserver-starter</artifactId>
<!-- Version from BOM. Do NOT pin version here. -->
</dependency>
<!-- FHIR R4 model classes — Patient, Condition, AuditEvent, etc. -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-structures-r4</artifactId>
</dependency>
<!-- Validation support framework — IValidationSupport chain -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-validation</artifactId>
</dependency>
<!-- Validation resources — built-in FHIR R4 profiles and
code system content (LOINC, SNOMED stubs, etc.) -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-validation-resources-r4</artifactId>
</dependency>
<!-- NPM package support — loads BD Core IG package.tgz
via NpmPackageValidationSupport -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-npm-packages</artifactId>
</dependency>
<!-- Remote terminology service — base class for our custom
BdTerminologyValidationSupport. We extend this to force
$validate-code and suppress $expand. -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-terminology</artifactId>
</dependency>
<!-- IInstanceValidator — used by FhirValidator to run
profile validation on submitted resources -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-validation-resources-r4</artifactId>
</dependency>
<!-- =======================================================
SPRING BOOT STARTERS
Versions managed by spring-boot-dependencies in parent.
======================================================= -->
<!-- Web MVC — embedded Tomcat, DispatcherServlet -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<!-- JPA / Hibernate — HAPI JPA persistence layer -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-jpa</artifactId>
</dependency>
<!-- Actuator — /actuator/health, /actuator/info, /actuator/metrics.
Health endpoints used by load balancer liveness/readiness probes.
Custom AuditDataSourceHealthIndicator registered here. -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
</dependency>
<!-- Validation (Bean Validation / Hibernate Validator) —
used for @Valid on REST controller inputs -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-validation</artifactId>
</dependency>
<!-- =======================================================
DATABASE
======================================================= -->
<!-- PostgreSQL JDBC driver — runtime only, not needed at compile -->
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<scope>runtime</scope>
</dependency>
<!-- Flyway core — schema migration engine.
Runs V1__hapi_schema.sql and V2__audit_schema.sql on startup
before HAPI JPA initialises. -->
<dependency>
<groupId>org.flywaydb</groupId>
<artifactId>flyway-core</artifactId>
</dependency>
<!-- Flyway PostgreSQL dialect — required for Flyway 10+.
Without this artifact, Flyway silently skips migrations
against PostgreSQL datasources. -->
<dependency>
<groupId>org.flywaydb</groupId>
<artifactId>flyway-database-postgresql</artifactId>
</dependency>
<!-- HikariCP — connection pool.
Spring Boot auto-configures HikariCP when it is on classpath.
Explicit declaration ensures version alignment with parent BOM. -->
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
</dependency>
<!-- =======================================================
SECURITY — JWT VALIDATION
======================================================= -->
<!-- Nimbus JOSE+JWT — JWT parsing, signature verification,
and JWKS remote key set with cache.
Used by KeycloakJwtInterceptor.
RemoteJWKSet provides kid-based cache invalidation:
keys cached 1 hour, re-fetched on unknown kid. -->
<dependency>
<groupId>com.nimbusds</groupId>
<artifactId>nimbus-jose-jwt</artifactId>
</dependency>
<!-- =======================================================
HTTP CLIENT — OCL and cluster validator calls
======================================================= -->
<!-- Apache HttpClient 5 — used by BdTerminologyValidationSupport
for OCL $validate-code calls, and ClusterExpressionValidator
for https://icd11.dghs.gov.bd/cluster/validate calls.
Separate from the HttpClient that HAPI uses internally
(HAPI uses its own managed instance). -->
<dependency>
<groupId>org.apache.httpcomponents.client5</groupId>
<artifactId>httpclient5</artifactId>
</dependency>
<!-- =======================================================
OBSERVABILITY
======================================================= -->
<!-- Micrometer Prometheus registry — exposes /actuator/prometheus
for Prometheus scraping. Optional but included from day one
for national-scale observability readiness. -->
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-prometheus</artifactId>
</dependency>
<!-- =======================================================
UTILITIES
======================================================= -->
<!-- Jackson — JSON serialisation for audit log payloads,
OCL API responses, cluster validator responses.
Managed by Spring Boot BOM. -->
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.datatype</groupId>
<artifactId>jackson-datatype-jsr310</artifactId>
</dependency>
<!-- SLF4J / Logback — Spring Boot default logging.
Logback configured in application.yaml for structured JSON
output suitable for ELK ingestion. -->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>7.4</version>
</dependency>
<!-- =======================================================
TEST DEPENDENCIES
======================================================= -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
<!-- Excludes vintage JUnit 4 engine — JUnit 5 only -->
<exclusions>
<exclusion>
<groupId>org.junit.vintage</groupId>
<artifactId>junit-vintage-engine</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- TestContainers — PostgreSQL container for integration tests.
Tests spin up a real PostgreSQL 15 container, run Flyway
migrations, and validate the full persistence layer.
Never use H2 — not even in tests. -->
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>postgresql</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<!-- HAPI FHIR test utilities — FhirContext in tests -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-test-utilities</artifactId>
<scope>test</scope>
</dependency>
<!-- WireMock — mock OCL and cluster validator in unit tests.
Allows testing 422 rejection paths without live OCL. -->
<dependency>
<groupId>org.wiremock</groupId>
<artifactId>wiremock-standalone</artifactId>
<version>3.5.4</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<!-- Spring Boot Maven plugin — repackages JAR as fat JAR
and embeds build-info.properties for /actuator/info.
Configured in parent pluginManagement. -->
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<!-- Configuration inherited from parent pluginManagement -->
</plugin>
<!-- Compiler plugin — Java 17, inherited from parent -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
</plugin>
<!-- Surefire — JUnit 5, inherited from parent -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<!-- Resources plugin — ensures packages/ directory with
bd.gov.dghs.core-0.2.1.tgz is included in the fat JAR
under classpath:packages/ -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
<configuration>
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>false</filtering>
<!-- filtering=false is critical: the .tgz is binary.
Maven resource filtering on binary files corrupts them. -->
</resource>
</resources>
</configuration>
</plugin>
</plugins>
<!-- Ensure the fat JAR is named predictably for Docker COPY -->
<finalName>bd-fhir-hapi</finalName>
</build>
</project>

View File

@@ -0,0 +1,30 @@
package bd.gov.dghs.fhir;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.scheduling.annotation.EnableAsync;
/**
* BD FHIR National Repository — Spring Boot application entry point.
*
* <p>This is the main class that bootstraps the Spring application context.
* All configuration is handled by the {@code @Configuration} classes in
* {@code bd.gov.dghs.fhir.config}. This class intentionally contains no
* configuration logic — it is a pure entry point.
*
* <p>{@code @EnableAsync} activates Spring's async task executor, which is
* required by {@link bd.gov.dghs.fhir.audit.AuditEventEmitter} and
* {@link bd.gov.dghs.fhir.audit.RejectedSubmissionSink} for non-blocking
* audit writes. The executor pool is configured in {@code application.yaml}
* under {@code spring.task.execution}.
*/
@SpringBootApplication(
scanBasePackages = "bd.gov.dghs.fhir"
)
@EnableAsync
public class BdFhirApplication {
public static void main(String[] args) {
SpringApplication.run(BdFhirApplication.class, args);
}
}

View File

@@ -0,0 +1,203 @@
package bd.gov.dghs.fhir.audit;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import java.time.Instant;
import java.util.List;
import java.util.UUID;
/**
* Emits audit event records to {@code audit.audit_events} asynchronously.
*
* <h2>Immutability enforcement</h2>
* <p>This class uses INSERT only — no UPDATE or DELETE methods exist.
* The {@code audit_writer_login} PostgreSQL user has INSERT-only privileges
* on the audit schema — any UPDATE or DELETE at the JDBC level would fail
* with a PostgreSQL permission error regardless of what this class attempts.
*
* <h2>Async execution</h2>
* <p>{@code @Async} causes Spring to execute {@link #emitAsync} on the
* {@code audit-async-} thread pool (configured in application.yaml
* {@code spring.task.execution}). The calling FHIR request thread returns
* immediately after submitting the task — audit writes do not add to
* vendor-visible response latency.
*
* <p>If the async queue is full (500 entries by default), the task executor
* blocks the submitting thread until space is available. At expected pilot
* load (<10,000 resources/day = ~0.1/s average), the queue should never fill.
* The queue depth provides burst absorption for sudden load spikes.
*
* <h2>Failure handling</h2>
* <p>If the INSERT fails (postgres-audit unavailable, constraint violation, etc.),
* the failure is logged at ERROR level. The FHIR operation has already completed
* successfully at this point — the vendor has received their 201 or 422.
* Audit write failure does not affect the FHIR response.
*/
@Component
public class AuditEventEmitter {
private static final Logger log = LoggerFactory.getLogger(AuditEventEmitter.class);
private final JdbcTemplate auditJdbcTemplate;
private final ObjectMapper objectMapper;
public AuditEventEmitter(
@Qualifier("auditJdbcTemplate") JdbcTemplate auditJdbcTemplate) {
this.auditJdbcTemplate = auditJdbcTemplate;
this.objectMapper = new ObjectMapper()
.registerModule(new JavaTimeModule());
}
/**
* Emits a single audit event record asynchronously.
*
* <p>Executes on the {@code audit-async-} thread pool.
* Returns immediately on the calling thread.
*/
@Async("taskExecutor")
public void emitAsync(AuditRecord record) {
try {
String validationMessagesJson = null;
if (record.validationMessages() != null && !record.validationMessages().isEmpty()) {
validationMessagesJson = objectMapper.writeValueAsString(
record.validationMessages());
}
auditJdbcTemplate.update(
"""
INSERT INTO audit.audit_events (
event_id, event_time, event_type, operation,
resource_type, resource_id, resource_version,
outcome, outcome_detail,
sending_facility, client_id, subject,
request_ip, request_id, validation_messages
) VALUES (
?, ?, ?, ?,
?, ?, ?,
?, ?,
?, ?, ?,
?, ?, ?::jsonb
)
""",
record.eventId(),
// event_time must be a java.sql.Timestamp for JDBC → timestamptz mapping
java.sql.Timestamp.from(record.eventTime()),
record.eventType(),
record.operation(),
record.resourceType(),
record.resourceId(),
record.resourceVersion(),
record.outcome(),
truncate(record.outcomeDetail(), 2000),
truncate(record.sendingFacility(), 200),
truncate(record.clientId(), 200),
truncate(record.subject(), 200),
truncate(record.requestIp(), 45),
truncate(record.requestId(), 36),
validationMessagesJson
);
log.debug("Audit event emitted: eventId={} outcome={} clientId={}",
record.eventId(), record.outcome(), record.clientId());
} catch (Exception e) {
// Log at ERROR — audit gap is a serious operational issue
log.error("AUDIT WRITE FAILED — event lost: eventId={} outcome={} " +
"clientId={} resourceType={} error={}",
record.eventId(), record.outcome(),
record.clientId(), record.resourceType(), e.getMessage(), e);
}
}
// =========================================================================
// Helpers
// =========================================================================
private String truncate(String value, int maxLength) {
if (value == null) return null;
return value.length() <= maxLength ? value : value.substring(0, maxLength);
}
// =========================================================================
// AuditRecord — immutable value object built by AuditEventInterceptor
// =========================================================================
/**
* Immutable audit record. Built by {@link bd.gov.dghs.fhir.interceptor.AuditEventInterceptor}
* and passed to {@link #emitAsync}.
*
* <p>Uses a builder pattern because not all fields are relevant for all
* event types (e.g., resourceId is null for auth failures).
*/
public record AuditRecord(
UUID eventId,
Instant eventTime,
String eventType, // OPERATION | AUTH_FAILURE | VALIDATION_FAILURE | TERMINOLOGY_FAILURE
String operation, // CREATE | UPDATE | DELETE | READ | PATCH
String resourceType,
String resourceId,
Long resourceVersion,
String outcome, // ACCEPTED | REJECTED
String outcomeDetail,
String sendingFacility,
String clientId,
String subject,
String requestIp,
String requestId,
List<String> validationMessages
) {
public static Builder builder() { return new Builder(); }
public static final class Builder {
private UUID eventId;
private Instant eventTime;
private String eventType;
private String operation;
private String resourceType;
private String resourceId;
private Long resourceVersion;
private String outcome;
private String outcomeDetail;
private String sendingFacility;
private String clientId;
private String subject;
private String requestIp;
private String requestId;
private List<String> validationMessages;
public Builder eventId(UUID v) { eventId = v; return this; }
public Builder eventTime(Instant v) { eventTime = v; return this; }
public Builder eventType(String v) { eventType = v; return this; }
public Builder operation(String v) { operation = v; return this; }
public Builder resourceType(String v) { resourceType = v; return this; }
public Builder resourceId(String v) { resourceId = v; return this; }
public Builder resourceVersion(Long v) { resourceVersion = v; return this; }
public Builder outcome(String v) { outcome = v; return this; }
public Builder outcomeDetail(String v) { outcomeDetail = v; return this; }
public Builder sendingFacility(String v) { sendingFacility = v; return this; }
public Builder clientId(String v) { clientId = v; return this; }
public Builder subject(String v) { subject = v; return this; }
public Builder requestIp(String v) { requestIp = v; return this; }
public Builder requestId(String v) { requestId = v; return this; }
public Builder validationMessages(List<String> v) { validationMessages = v; return this; }
public AuditRecord build() {
return new AuditRecord(
eventId != null ? eventId : UUID.randomUUID(),
eventTime != null ? eventTime : Instant.now(),
eventType, operation, resourceType, resourceId,
resourceVersion, outcome, outcomeDetail, sendingFacility,
clientId != null ? clientId : "unknown",
subject != null ? subject : "unknown",
requestIp, requestId, validationMessages);
}
}
}
}

View File

@@ -0,0 +1,218 @@
package bd.gov.dghs.fhir.audit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import java.time.Instant;
import java.util.UUID;
/**
* Stores rejected FHIR resource payloads to {@code audit.fhir_rejected_submissions}.
*
* <h2>Purpose</h2>
* <p>Rejected resources are never stored in the main FHIR repository. Instead,
* the full submitted payload is written here with:
* <ul>
* <li>The exact JSON as submitted by the vendor (byte-for-byte, in TEXT column)</li>
* <li>A machine-readable rejection code (from the {@code rejection_code} CHECK constraint)</li>
* <li>A human-readable rejection reason</li>
* <li>The FHIRPath expression of the violating element</li>
* <li>The violated profile URL (for profile violations)</li>
* <li>The invalid code and system (for terminology violations)</li>
* <li>The sending facility and client_id</li>
* </ul>
*
* <h2>Forensic use cases</h2>
* <ul>
* <li>Vendor debugging: vendor submits a Condition, gets 422, asks DGHS why.
* DGHS queries by client_id + submission_time to retrieve the exact payload
* and rejection reason.</li>
* <li>Dispute resolution: vendor claims they submitted a valid resource.
* DGHS retrieves the original payload to verify.</li>
* <li>Quality monitoring: DGHS analyses rejection patterns by facility or
* rejection code to identify systemic data quality issues.</li>
* <li>IG development: common rejection codes indicate constraints that may
* be too strict or IG profiles that need clarification.</li>
* </ul>
*
* <h2>Payload storage format</h2>
* <p>Payload is stored as TEXT (not JSONB) to preserve the exact bytes as
* submitted. JSONB parsing at INSERT time would normalise whitespace and key
* ordering, obscuring potential encoding or formatting issues in vendor submissions.
* It would also reject malformed JSON payloads — but a malformed JSON payload
* is itself a valid rejection scenario that needs to be captured.
*
* <h2>Retention policy</h2>
* <p>No automatic deletion. The partitioned table structure allows old partitions
* to be DROPped when retention policy dictates (e.g., drop partitions older than
* 7 years per health record retention law). DROP PARTITION is a metadata-only
* operation — instant and non-blocking unlike DELETE on unpartitioned tables.
* Retention management is a DBA responsibility, not an application responsibility.
*/
@Component
public class RejectedSubmissionSink {
private static final Logger log = LoggerFactory.getLogger(RejectedSubmissionSink.class);
/**
* Maximum payload size stored in the rejected submissions table.
* FHIR resources should not exceed 1MB in practice, but malformed or
* adversarial payloads could be arbitrarily large. Cap at 4MB to prevent
* the audit table from becoming a vector for storage exhaustion.
*/
private static final int MAX_PAYLOAD_BYTES = 4 * 1024 * 1024;
private final JdbcTemplate auditJdbcTemplate;
public RejectedSubmissionSink(
@Qualifier("auditJdbcTemplate") JdbcTemplate auditJdbcTemplate) {
this.auditJdbcTemplate = auditJdbcTemplate;
}
/**
* Stores a rejected submission asynchronously.
*
* <p>Executes on the {@code audit-async-} thread pool.
* Returns immediately on the calling FHIR request thread.
* The 422 response is already on its way to the vendor before this runs.
*/
@Async("taskExecutor")
public void storeAsync(RejectedSubmission submission) {
try {
// Enforce payload size cap before writing
String payload = submission.resourcePayload();
if (payload != null && payload.length() > MAX_PAYLOAD_BYTES) {
log.warn("Rejected submission payload truncated: originalSize={} " +
"clientId={} submissionId={}",
payload.length(), submission.clientId(), submission.submissionId());
payload = payload.substring(0, MAX_PAYLOAD_BYTES) +
"\n... [TRUNCATED: payload exceeded " + MAX_PAYLOAD_BYTES + " bytes]";
}
auditJdbcTemplate.update(
"""
INSERT INTO audit.fhir_rejected_submissions (
submission_id, submission_time, event_id,
resource_type, resource_payload,
rejection_code, rejection_reason,
element_path, violated_profile,
invalid_code, invalid_system,
sending_facility, client_id
) VALUES (
?, ?, ?,
?, ?,
?, ?,
?, ?,
?, ?,
?, ?
)
""",
submission.submissionId(),
java.sql.Timestamp.from(submission.submissionTime()),
submission.eventId(),
truncate(submission.resourceType(), 40),
payload,
submission.rejectionCode(),
truncate(submission.rejectionReason(), 5000),
truncate(submission.elementPath(), 500),
truncate(submission.violatedProfile(), 500),
truncate(submission.invalidCode(), 200),
truncate(submission.invalidSystem(), 200),
truncate(submission.sendingFacility(), 200),
truncate(submission.clientId(), 200)
);
log.debug("Rejected submission stored: submissionId={} rejectionCode={} " +
"clientId={} resourceType={}",
submission.submissionId(), submission.rejectionCode(),
submission.clientId(), submission.resourceType());
} catch (Exception e) {
log.error("REJECTED SUBMISSION STORAGE FAILED — forensic record lost: " +
"submissionId={} rejectionCode={} clientId={} error={}",
submission.submissionId(), submission.rejectionCode(),
submission.clientId(), e.getMessage(), e);
}
}
// =========================================================================
// Helper
// =========================================================================
private String truncate(String value, int maxLength) {
if (value == null) return null;
return value.length() <= maxLength ? value : value.substring(0, maxLength);
}
// =========================================================================
// RejectedSubmission — immutable value object
// =========================================================================
/**
* Immutable rejected submission record.
* Built by {@link bd.gov.dghs.fhir.interceptor.AuditEventInterceptor}
* and passed to {@link #storeAsync}.
*/
public record RejectedSubmission(
UUID submissionId,
Instant submissionTime,
UUID eventId,
String resourceType,
String resourcePayload,
String rejectionCode,
String rejectionReason,
String elementPath,
String violatedProfile,
String invalidCode,
String invalidSystem,
String sendingFacility,
String clientId
) {
public static Builder builder() { return new Builder(); }
public static final class Builder {
private UUID submissionId;
private Instant submissionTime;
private UUID eventId;
private String resourceType;
private String resourcePayload;
private String rejectionCode;
private String rejectionReason;
private String elementPath;
private String violatedProfile;
private String invalidCode;
private String invalidSystem;
private String sendingFacility;
private String clientId;
public Builder submissionId(UUID v) { submissionId = v; return this; }
public Builder submissionTime(Instant v) { submissionTime = v; return this; }
public Builder eventId(UUID v) { eventId = v; return this; }
public Builder resourceType(String v) { resourceType = v; return this; }
public Builder resourcePayload(String v) { resourcePayload = v; return this; }
public Builder rejectionCode(String v) { rejectionCode = v; return this; }
public Builder rejectionReason(String v) { rejectionReason = v; return this; }
public Builder elementPath(String v) { elementPath = v; return this; }
public Builder violatedProfile(String v) { violatedProfile = v; return this; }
public Builder invalidCode(String v) { invalidCode = v; return this; }
public Builder invalidSystem(String v) { invalidSystem = v; return this; }
public Builder sendingFacility(String v) { sendingFacility = v; return this; }
public Builder clientId(String v) { clientId = v; return this; }
public RejectedSubmission build() {
return new RejectedSubmission(
submissionId != null ? submissionId : UUID.randomUUID(),
submissionTime != null ? submissionTime : Instant.now(),
eventId != null ? eventId : UUID.randomUUID(),
resourceType, resourcePayload, rejectionCode,
rejectionReason, elementPath, violatedProfile,
invalidCode, invalidSystem, sendingFacility,
clientId != null ? clientId : "unknown");
}
}
}
}

View File

@@ -0,0 +1,485 @@
package bd.gov.dghs.fhir.config;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.flywaydb.core.Flyway;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.boot.autoconfigure.flyway.FlywayMigrationInitializer;
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.orm.jpa.EntityManagerFactoryBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Primary;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.orm.jpa.JpaTransactionManager;
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import jakarta.persistence.EntityManagerFactory;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
/**
* Dual datasource configuration for BD FHIR National deployment.
*
* <p>Two completely independent datasources:
* <ul>
* <li>{@code fhirDataSource} — read/write, HAPI JPA store (postgres-fhir via pgBouncer).
* Primary datasource — Spring Boot JPA auto-configuration binds to this.</li>
* <li>{@code auditDataSource} — INSERT-only, audit schema (postgres-audit via pgBouncer).
* Secondary datasource — wired manually, excluded from default health checks.</li>
* </ul>
*
* <p>Two independent Flyway instances:
* <ul>
* <li>FHIR Flyway: runs {@code classpath:db/migration/fhir/V1__hapi_schema.sql}
* against postgres-fhir using superuser credentials (direct, bypasses pgBouncer).</li>
* <li>Audit Flyway: runs {@code classpath:db/migration/audit/V2__audit_schema.sql}
* against postgres-audit using superuser credentials (direct, bypasses pgBouncer).</li>
* </ul>
*
* <p>The FHIR datasource connects via pgBouncer in session mode. Flyway migrations
* connect directly to PostgreSQL (bypassing pgBouncer) to avoid DDL transaction
* visibility issues with pgBouncer. See application.yaml comments for rationale.
*
* <p>The audit datasource health indicator uses
* {@code INSERT INTO audit.health_check ... ON CONFLICT DO NOTHING}
* rather than {@code SELECT 1}, because the audit_writer role has INSERT-only
* privileges and cannot execute SELECT statements.
*/
@Configuration
@EnableTransactionManagement
public class DataSourceConfig {
private static final Logger log = LoggerFactory.getLogger(DataSourceConfig.class);
// =========================================================================
// FHIR DATASOURCE — Primary
// =========================================================================
/**
* FHIR datasource properties from {@code spring.datasource.*}.
* Spring Boot auto-configuration reads these and creates the primary datasource.
*/
@Bean
@Primary
@ConfigurationProperties(prefix = "spring.datasource")
public DataSourceProperties fhirDataSourceProperties() {
return new DataSourceProperties();
}
/**
* Primary HikariCP datasource for HAPI JPA.
* Connects to postgres-fhir via pgBouncer (session mode).
*
* <p>Pool sizing: {@code maximumPoolSize=5} per replica.
* At 3 replicas: 15 total PostgreSQL connections, well within
* pgBouncer {@code pool_size=20}.
*
* <p>{@code @Primary} makes this the datasource that Spring Boot's
* JPA auto-configuration, JdbcTemplate, and Flyway auto-configuration
* bind to by default.
*/
@Bean
@Primary
@ConfigurationProperties(prefix = "spring.datasource.hikari")
public DataSource fhirDataSource() {
DataSource ds = fhirDataSourceProperties()
.initializeDataSourceBuilder()
.type(HikariDataSource.class)
.build();
log.info("FHIR datasource initialised: url={}",
fhirDataSourceProperties().getUrl());
return ds;
}
/**
* JdbcTemplate bound to the FHIR datasource.
* Used by {@link bd.gov.dghs.fhir.init.IgPackageInitializer}
* for advisory lock acquisition.
*/
@Bean
@Primary
public JdbcTemplate fhirJdbcTemplate(@Qualifier("fhirDataSource") DataSource fhirDataSource) {
return new JdbcTemplate(fhirDataSource);
}
// =========================================================================
// AUDIT DATASOURCE — Secondary
// =========================================================================
/**
* Audit datasource properties from {@code audit.datasource.*}.
* Separate prefix — Spring Boot auto-configuration does NOT touch this.
*/
@Bean
@ConfigurationProperties(prefix = "audit.datasource")
public DataSourceProperties auditDataSourceProperties() {
return new DataSourceProperties();
}
/**
* HikariCP datasource for audit writes.
* Connects to postgres-audit via pgBouncer (session mode).
*
* <p>The audit_writer_login PostgreSQL user has INSERT-only privileges
* on the audit schema. Any SELECT, UPDATE, DELETE, or TRUNCATE issued
* against this datasource will fail with a PostgreSQL permission error.
*
* <p>Pool sizing: {@code maximumPoolSize=2} — audit writes are async
* and low-volume. Audit failures do not block FHIR request processing.
*
* <p>NOT annotated {@code @Primary} — must be injected by qualifier.
*/
@Bean("auditDataSource")
public DataSource auditDataSource() {
HikariConfig config = new HikariConfig();
config.setJdbcUrl(auditDataSourceProperties().getUrl());
config.setUsername(auditDataSourceProperties().getUsername());
config.setPassword(auditDataSourceProperties().getPassword());
config.setDriverClassName("org.postgresql.Driver");
// Pool sizing from environment — see application.yaml
config.setMaximumPoolSize(
Integer.parseInt(System.getenv().getOrDefault(
"AUDIT_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE", "2")));
config.setMinimumIdle(
Integer.parseInt(System.getenv().getOrDefault(
"AUDIT_DATASOURCE_HIKARI_MINIMUM_IDLE", "1")));
config.setPoolName(System.getenv().getOrDefault(
"AUDIT_DATASOURCE_HIKARI_POOL_NAME", "audit-pool"));
config.setConnectionTimeout(5_000L);
config.setIdleTimeout(300_000L);
config.setMaxLifetime(900_000L);
config.setAutoCommit(true);
// PostgreSQL-specific
config.addDataSourceProperty("ApplicationName", "bd-fhir-hapi-audit");
config.addDataSourceProperty("socketTimeout", "10");
// Leak detection: 30s — audit connections should never be held long
config.setLeakDetectionThreshold(30_000L);
// Connection test: audit_writer cannot SELECT 1.
// Use INSERT ... ON CONFLICT DO NOTHING against the health_check table.
// HikariCP calls this to verify a connection is alive when returning
// it from the pool after idle time.
config.setConnectionTestQuery(
"INSERT INTO audit.health_check (check_id) " +
"VALUES ('00000000-0000-0000-0000-000000000000') " +
"ON CONFLICT DO NOTHING");
log.info("Audit datasource initialised: url={}",
auditDataSourceProperties().getUrl());
return new HikariDataSource(config);
}
/**
* JdbcTemplate bound to the audit datasource.
* Used by {@link bd.gov.dghs.fhir.audit.AuditEventEmitter}
* and {@link bd.gov.dghs.fhir.audit.RejectedSubmissionSink}.
*/
@Bean("auditJdbcTemplate")
public JdbcTemplate auditJdbcTemplate(
@Qualifier("auditDataSource") DataSource auditDataSource) {
return new JdbcTemplate(auditDataSource);
}
// =========================================================================
// FLYWAY — FHIR schema
// Spring Boot auto-configuration runs the primary Flyway instance.
// We override it here to point at the fhir/ migration subdirectory
// and to connect directly to PostgreSQL (bypassing pgBouncer).
// =========================================================================
/**
* FHIR Flyway instance.
*
* <p>Connects directly to postgres-fhir using superuser credentials
* ({@code SPRING_FLYWAY_USER} / {@code SPRING_FLYWAY_PASSWORD}) rather
* than the application user, so that it can CREATE TABLE, CREATE INDEX,
* and CREATE SEQUENCE.
*
* <p>The connection URL ({@code SPRING_FLYWAY_URL}) points directly to
* postgres-fhir, not pgBouncer, to avoid DDL transaction visibility issues.
*
* <p>Spring Boot's Flyway auto-configuration is disabled in favour of
* this explicit bean. See {@code spring.flyway.enabled=true} in
* application.yaml — Spring Boot reads the properties but we override
* the bean with our own configuration here for full control.
*/
@Bean("fhirFlyway")
public Flyway fhirFlyway() {
String url = System.getenv("SPRING_FLYWAY_URL");
String user = System.getenv("SPRING_FLYWAY_USER");
String password = System.getenv("SPRING_FLYWAY_PASSWORD");
if (url == null || user == null || password == null) {
throw new IllegalStateException(
"FHIR Flyway configuration missing. " +
"Required: SPRING_FLYWAY_URL, SPRING_FLYWAY_USER, SPRING_FLYWAY_PASSWORD");
}
Flyway flyway = Flyway.configure()
.dataSource(url, user, password)
.locations("classpath:db/migration/fhir")
.table("flyway_schema_history")
.validateOnMigrate(true)
.outOfOrder(false)
.baselineOnMigrate(false)
.mixed(false)
.connectRetries(10) // retry DB connection up to 10 times on startup
.connectRetriesInterval(5) // 5 seconds between retries
.load();
log.info("Running FHIR Flyway migrations from classpath:db/migration/fhir");
var result = flyway.migrate();
log.info("FHIR Flyway: {} migration(s) applied, current version: {}",
result.migrationsExecuted, result.targetSchemaVersion);
return flyway;
}
/**
* FlywayMigrationInitializer ensures Flyway runs before JPA EntityManagerFactory
* attempts to validate the schema. Without this ordering, Hibernate's
* {@code ddl-auto: validate} runs against an empty database and fails.
*/
@Bean
@DependsOn("fhirFlyway")
public FlywayMigrationInitializer fhirFlywayInitializer(
@Qualifier("fhirFlyway") Flyway fhirFlyway) {
return new FlywayMigrationInitializer(fhirFlyway, null);
}
// =========================================================================
// FLYWAY — Audit schema
// Completely independent instance — separate database, separate history table.
// =========================================================================
/**
* Audit Flyway instance.
*
* <p>Runs {@code V2__audit_schema.sql} against postgres-audit using
* superuser credentials. Creates the audit schema, partitioned tables,
* roles, and grants.
*
* <p>Uses {@code flyway_audit_schema_history} as its metadata table name
* to avoid collision with the FHIR Flyway history table.
*/
@Bean("auditFlyway")
public Flyway auditFlyway() {
String url = System.getenv("AUDIT_FLYWAY_URL");
String user = System.getenv("AUDIT_FLYWAY_USER");
String password = System.getenv("AUDIT_FLYWAY_PASSWORD");
if (url == null || user == null || password == null) {
throw new IllegalStateException(
"Audit Flyway configuration missing. " +
"Required: AUDIT_FLYWAY_URL, AUDIT_FLYWAY_USER, AUDIT_FLYWAY_PASSWORD");
}
Flyway flyway = Flyway.configure()
.dataSource(url, user, password)
.locations("classpath:db/migration/audit")
.table("flyway_audit_schema_history")
.validateOnMigrate(true)
.outOfOrder(false)
.baselineOnMigrate(false)
.mixed(false)
.connectRetries(10)
.connectRetriesInterval(5)
.load();
log.info("Running Audit Flyway migrations from classpath:db/migration/audit");
var result = flyway.migrate();
log.info("Audit Flyway: {} migration(s) applied, current version: {}",
result.migrationsExecuted, result.targetSchemaVersion);
return flyway;
}
@Bean
@DependsOn("auditFlyway")
public FlywayMigrationInitializer auditFlywayInitializer(
@Qualifier("auditFlyway") Flyway auditFlyway) {
return new FlywayMigrationInitializer(auditFlyway, null);
}
// =========================================================================
// HEALTH INDICATORS
// =========================================================================
/**
* Custom health indicator for the audit datasource.
*
* <p>Spring Boot's default {@code DataSourceHealthIndicator} executes
* {@code SELECT 1} against all registered datasources. The audit_writer
* role cannot execute SELECT — this would cause a spurious DOWN status,
* triggering load balancer container cycling.
*
* <p>This indicator uses {@code INSERT INTO audit.health_check ... ON CONFLICT DO NOTHING}
* which the audit_writer role is permitted to execute. The INSERT is idempotent
* and does not grow the health_check table (the single seeded row is reused
* on every conflict).
*
* <p>Registered as {@code auditDb} — matches the readiness probe group
* in application.yaml ({@code management.endpoint.health.group.readiness.include}).
*/
@Bean("auditDbHealthIndicator")
public HealthIndicator auditDbHealthIndicator(
@Qualifier("auditDataSource") DataSource auditDataSource) {
return () -> {
try (Connection conn = auditDataSource.getConnection()) {
// Test the connection is alive with an INSERT the audit_writer
// role is permitted to execute.
conn.createStatement().execute(
"INSERT INTO audit.health_check (check_id) " +
"VALUES ('00000000-0000-0000-0000-000000000000') " +
"ON CONFLICT DO NOTHING");
return Health.up()
.withDetail("database", "postgres-audit")
.withDetail("pool", "audit-pool")
.build();
} catch (SQLException e) {
log.error("Audit datasource health check failed: {}", e.getMessage());
return Health.down()
.withDetail("database", "postgres-audit")
.withDetail("error", e.getMessage())
.build();
}
};
}
/**
* Health indicator for OCL reachability.
*
* <p>Registered as {@code ocl} — included in the readiness probe group.
* OCL unreachability makes the replica not-ready (no traffic) but does
* not kill the container (liveness remains passing).
*
* <p>Uses a lightweight HEAD request to the OCL FHIR metadata endpoint
* rather than a full $validate-code call.
*/
@Bean("oclHealthIndicator")
public HealthIndicator oclHealthIndicator() {
String oclBaseUrl = System.getenv().getOrDefault(
"HAPI_OCL_BASE_URL", "https://tr.ocl.dghs.gov.bd/api/fhir");
String metadataUrl = oclBaseUrl + "/metadata";
return () -> {
try {
java.net.URI uri = java.net.URI.create(metadataUrl);
java.net.http.HttpClient client = java.net.http.HttpClient.newBuilder()
.connectTimeout(java.time.Duration.ofSeconds(5))
.build();
java.net.http.HttpRequest request = java.net.http.HttpRequest.newBuilder()
.uri(uri)
.method("HEAD", java.net.http.HttpRequest.BodyPublishers.noBody())
.timeout(java.time.Duration.ofSeconds(5))
.build();
java.net.http.HttpResponse<Void> response = client.send(
request, java.net.http.HttpResponse.BodyHandlers.discarding());
if (response.statusCode() < 500) {
return Health.up()
.withDetail("url", metadataUrl)
.withDetail("status", response.statusCode())
.build();
} else {
return Health.down()
.withDetail("url", metadataUrl)
.withDetail("status", response.statusCode())
.build();
}
} catch (Exception e) {
log.warn("OCL health check failed: {}", e.getMessage());
return Health.down()
.withDetail("url", metadataUrl)
.withDetail("error", e.getMessage())
.build();
}
};
}
// =========================================================================
// JPA — EntityManagerFactory and TransactionManager
// Explicitly declared to ensure Spring Boot binds them to fhirDataSource.
// Without explicit declaration, Spring Boot auto-configuration may attempt
// to bind to both datasources (because two DataSource beans exist)
// and fail with "No qualifying bean of type DataSource".
// =========================================================================
/**
* EntityManagerFactory for HAPI JPA, bound explicitly to fhirDataSource.
*
* <p>Package scan covers HAPI's internal entity classes. HAPI registers
* its entities via its own JPA configuration — this factory is the container
* that hosts them. The {@code persistenceUnit} name "default" is what HAPI
* expects when it resolves the EntityManagerFactory from the Spring context.
*/
@Bean
@Primary
@DependsOn({"fhirFlywayInitializer"})
public LocalContainerEntityManagerFactoryBean entityManagerFactory(
EntityManagerFactoryBuilder builder,
@Qualifier("fhirDataSource") DataSource fhirDataSource) {
Map<String, Object> properties = new HashMap<>();
properties.put("hibernate.dialect",
"org.hibernate.dialect.PostgreSQLDialect");
properties.put("hibernate.hbm2ddl.auto", "validate");
properties.put("hibernate.jdbc.batch_size", "50");
properties.put("hibernate.order_inserts", "true");
properties.put("hibernate.order_updates", "true");
properties.put("hibernate.jdbc.fetch_size", "100");
properties.put("hibernate.cache.use_second_level_cache", "false");
properties.put("hibernate.cache.use_query_cache", "false");
properties.put("hibernate.generate_statistics", "false");
properties.put("hibernate.format_sql", "false");
properties.put("hibernate.show_sql", "false");
properties.put("hibernate.connection.handling_mode",
"DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION");
return builder
.dataSource(fhirDataSource)
.packages(
// HAPI entity packages
"ca.uhn.hapi.fhir.jpa.model.entity",
"ca.uhn.hapi.fhir.jpa.entity",
// NPM package entities
"ca.uhn.fhir.jpa.model.entity"
)
.persistenceUnit("default")
.properties(properties)
.build();
}
/**
* Transaction manager for HAPI JPA, bound to the FHIR EntityManagerFactory.
*
* <p>Audit writes use direct JDBC via {@code auditJdbcTemplate} —
* they are not transactional in the JPA sense and do not participate
* in HAPI's JPA transactions.
*/
@Bean
@Primary
public PlatformTransactionManager transactionManager(
@Qualifier("entityManagerFactory") EntityManagerFactory entityManagerFactory) {
JpaTransactionManager txManager = new JpaTransactionManager();
txManager.setEntityManagerFactory(entityManagerFactory);
return txManager;
}
}

View File

@@ -0,0 +1,542 @@
package bd.gov.dghs.fhir.config;
import bd.gov.dghs.fhir.init.IgPackageInitializer;
import bd.gov.dghs.fhir.terminology.BdTerminologyValidationSupport;
import ca.uhn.fhir.context.FhirContext;
import ca.uhn.fhir.context.support.DefaultProfileValidationSupport;
import ca.uhn.fhir.context.support.IValidationSupport;
import ca.uhn.fhir.jpa.api.config.JpaStorageSettings;
import ca.uhn.fhir.jpa.api.dao.DaoRegistry;
import ca.uhn.fhir.jpa.packages.NpmPackageValidationSupport;
import ca.uhn.fhir.jpa.starter.AppProperties;
import ca.uhn.fhir.rest.server.RestfulServer;
import ca.uhn.fhir.rest.server.interceptor.RequestValidatingInterceptor;
import ca.uhn.fhir.rest.server.interceptor.ResponseHighlighterInterceptor;
import ca.uhn.fhir.validation.FhirValidator;
import ca.uhn.fhir.validation.IValidatorModule;
import ca.uhn.fhir.validation.ResultSeverityEnum;
import org.hl7.fhir.common.hapi.validation.support.CommonCodeSystemsTerminologyService;
import org.hl7.fhir.common.hapi.validation.support.InMemoryTerminologyServerValidationSupport;
import org.hl7.fhir.common.hapi.validation.support.SnapshotGeneratingValidationSupport;
import org.hl7.fhir.common.hapi.validation.support.ValidationSupportChain;
import org.hl7.fhir.common.hapi.validation.validator.FhirInstanceValidator;
import org.hl7.fhir.r4.model.Meta;
import org.hl7.fhir.r4.model.Resource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import jakarta.annotation.PostConstruct;
import java.io.IOException;
import java.io.InputStream;
import java.util.Set;
/**
* HAPI FHIR server configuration for BD National deployment.
*
* <p>Responsibilities:
* <ol>
* <li>Build the validation support chain in correct dependency order</li>
* <li>Load BD Core IG package.tgz from classpath into NpmPackageValidationSupport</li>
* <li>Configure JPA storage settings (no H2, correct dialect)</li>
* <li>Register the RequestValidatingInterceptor that enforces validation on all writes</li>
* <li>Register the unvalidated-profile meta tag interceptor for unknown resource types</li>
* </ol>
*
* <h2>Validation Support Chain (order is critical)</h2>
* <pre>
* 1. DefaultProfileValidationSupport
* — Built-in FHIR R4 base profiles, StructureDefinitions, ValueSets.
* — Must be first: provides the base against which all profiles are validated.
*
* 2. CommonCodeSystemsTerminologyService
* — Validates codes in common code systems (UCUM, MimeType, Language, etc.)
* without calling an external terminology server.
* — Must precede remote support: prevents unnecessary OCL calls for
* non-ICD-11 coded elements.
*
* 3. SnapshotGeneratingValidationSupport
* — Generates snapshot views of differential StructureDefinitions.
* — BD Core IG profiles may be published as differential-only.
* Validation requires the snapshot. This support generates it on demand.
* — Must come after DefaultProfileValidationSupport (needs base profiles
* to generate snapshots from differentials).
*
* 4. InMemoryTerminologyServerValidationSupport
* — In-memory cache for terminology lookups.
* — Serves as the 24-hour cache layer in front of OCL.
* — Must come before BdTerminologyValidationSupport (cache-before-remote).
*
* 5. NpmPackageValidationSupport (BD Core IG)
* — Loads bd.gov.dghs.core-0.2.1.tgz profiles into the validation chain.
* — bd-patient, bd-condition, bd-encounter, bd-observation, bd-practitioner,
* bd-organization, bd-location, bd-medication, bd-medicationrequest,
* bd-immunization.
* — Must come after snapshot support (profiles may need snapshot generation).
*
* 6. BdTerminologyValidationSupport (custom — see terminology/ package)
* — Forces $validate-code against OCL for ICD-11 codes.
* — Suppresses $expand (OCL limitation).
* — Must be last: OCL is the final authority for terminology.
* InMemoryTerminologyServerValidationSupport serves the cache hit path.
* </pre>
*/
@Configuration
@DependsOn({"fhirFlywayInitializer", "auditFlywayInitializer"})
public class FhirServerConfig {
private static final Logger log = LoggerFactory.getLogger(FhirServerConfig.class);
// BD Core IG profile URLs — resources conforming to these get full validation.
// Resources NOT in this set get stored with unvalidated-profile meta tag.
private static final Set<String> BD_CORE_PROFILE_RESOURCE_TYPES = Set.of(
"Patient", "Condition", "Encounter", "Observation",
"Practitioner", "Organization", "Location",
"Medication", "MedicationRequest", "Immunization"
);
@Value("${hapi.fhir.server-address}")
private String serverAddress;
@Value("${bd.fhir.ig.package-classpath}")
private String igPackageClasspath;
@Value("${bd.fhir.ig.version}")
private String igVersion;
@Value("${bd.fhir.unvalidated-profile-tag-system}")
private String unvalidatedTagSystem;
@Value("${bd.fhir.unvalidated-profile-tag-code}")
private String unvalidatedTagCode;
// =========================================================================
// FHIR CONTEXT
// =========================================================================
/**
* Singleton FhirContext for FHIR R4.
*
* <p>FhirContext is expensive to create (parses all FHIR R4 model classes).
* It must be a singleton — never create per-request or per-validator instances.
* HAPI's Spring Boot starter provides this bean automatically, but we declare
* it explicitly to ensure it is available for injection by our custom classes
* before HAPI's auto-configuration runs.
*/
@Bean
public FhirContext fhirContext() {
FhirContext ctx = FhirContext.forR4();
// Performance: disable narrative generation server-side
ctx.setNarrativeGenerator(null);
log.info("FhirContext R4 initialised");
return ctx;
}
// =========================================================================
// JPA STORAGE SETTINGS
// =========================================================================
/**
* HAPI JPA storage settings.
*
* <p>Key decisions:
* <ul>
* <li>No H2: enforced by PostgreSQL datasource — H2 is never on classpath.</li>
* <li>Allow external references: true — BD Core IG uses canonical URLs.</li>
* <li>Reindex on parameter change: false — managed via HAPI $reindex operation.</li>
* <li>Auto-create placeholder references: false — all references must resolve.</li>
* </ul>
*/
@Bean
public JpaStorageSettings jpaStorageSettings() {
JpaStorageSettings settings = new JpaStorageSettings();
// Allow resources to reference external canonical URLs
// (BD Core IG profiles reference https://fhir.dghs.gov.bd/core/*)
settings.setAllowExternalReferences(true);
// Enforce referential integrity — referenced resources must exist
// in the repository before the referencing resource is stored.
// Set to false for pilot phase to allow incremental vendor onboarding.
// Set to true for national rollout when all resource types are live.
settings.setEnforceReferentialIntegrityOnWrite(false);
settings.setEnforceReferentialIntegrityOnDelete(false);
// Reuse cached search results for 60 seconds
settings.setReuseCachedSearchResultsForMillis(60_000L);
// Maximum number of search results to load into memory
// before streaming. 1000 is safe for pilot scale.
settings.setFetchSizeDefaultMaximum(1000);
// Default and max page sizes — match application.yaml
settings.setDefaultPageSize(20);
settings.setMaximumPageSize(200);
// Auto-version references: off — vendors must submit correct versions
settings.setAutoVersionReferenceAtPaths();
// Tag system for unvalidated profiles
// This is the tag we add to resources of unknown types
log.info("JPA storage settings configured. Unvalidated profile tag: {}|{}",
unvalidatedTagSystem, unvalidatedTagCode);
return settings;
}
// =========================================================================
// VALIDATION SUPPORT CHAIN
// =========================================================================
/**
* BD Core IG NpmPackageValidationSupport.
*
* <p>Loads the BD Core IG package.tgz from the classpath into memory.
* The package.tgz is bundled into the Docker image at build time
* ({@code src/main/resources/packages/}).
*
* <p>Package loading happens during Spring context initialisation via
* {@link IgPackageInitializer}, which acquires a PostgreSQL advisory lock
* to prevent race conditions on multi-replica startup. This bean is the
* validation support wrapper — actual loading is triggered by
* {@link IgPackageInitializer}.
*/
@Bean
public NpmPackageValidationSupport npmPackageValidationSupport(FhirContext fhirContext) {
NpmPackageValidationSupport support = new NpmPackageValidationSupport(fhirContext);
// Load the IG package from classpath.
// The classpath path resolves to the .tgz bundled in the fat JAR.
String classpathPath = igPackageClasspath.replace("classpath:", "");
try (InputStream is = getClass().getClassLoader().getResourceAsStream(classpathPath)) {
if (is == null) {
throw new IllegalStateException(
"BD Core IG package not found at classpath: " + classpathPath +
". Ensure the .tgz file is present in src/main/resources/packages/ " +
"before building the Docker image.");
}
support.loadPackageFromClasspath("classpath:" + classpathPath);
log.info("BD Core IG package loaded: classpath:{} (version {})",
classpathPath, igVersion);
} catch (IOException e) {
throw new IllegalStateException(
"Failed to load BD Core IG package from classpath: " + classpathPath, e);
}
return support;
}
/**
* The complete validation support chain.
*
* <p>Chain order is documented in the class Javadoc above.
* Do not reorder without understanding the dependency graph.
*
* <p>The {@link BdTerminologyValidationSupport} is last because:
* <ol>
* <li>HAPI calls each support in order, stopping at the first
* that returns a non-null result.</li>
* <li>{@code InMemoryTerminologyServerValidationSupport} serves
* cache hits — if the code was validated in the last 24 hours,
* OCL is never called.</li>
* <li>{@code BdTerminologyValidationSupport} handles cache misses
* by calling OCL {@code $validate-code}.</li>
* </ol>
*/
@Bean
public ValidationSupportChain validationSupportChain(
FhirContext fhirContext,
NpmPackageValidationSupport npmPackageValidationSupport,
BdTerminologyValidationSupport bdTerminologyValidationSupport) {
ValidationSupportChain chain = new ValidationSupportChain(
// 1. Base FHIR R4 profiles — must be first
new DefaultProfileValidationSupport(fhirContext),
// 2. Common code systems (UCUM, MimeType, etc.) — before remote
new CommonCodeSystemsTerminologyService(fhirContext),
// 3. Snapshot generation from differentials
new SnapshotGeneratingValidationSupport(fhirContext),
// 4. In-memory cache — serves validated codes without OCL call
// This is HAPI's InMemoryTerminologyServerValidationSupport,
// not our custom cache. HAPI's built-in cache handles
// ValueSet expansion results for non-ICD-11 systems.
new InMemoryTerminologyServerValidationSupport(fhirContext),
// 5. BD Core IG profiles (bd-patient, bd-condition, etc.)
npmPackageValidationSupport,
// 6. OCL remote terminology — ICD-11 $validate-code, $expand suppressed
bdTerminologyValidationSupport
);
log.info("Validation support chain configured with {} supports",
chain.getValidationSupports().size());
return chain;
}
/**
* FhirInstanceValidator — the HAPI validator module that runs profile
* validation against a resource using the support chain.
*
* <p>This module is registered with the {@link FhirValidator} and invoked
* by the {@link RequestValidatingInterceptor} on every incoming request.
*/
@Bean
public IValidatorModule fhirInstanceValidator(
ValidationSupportChain validationSupportChain) {
FhirInstanceValidator validator = new FhirInstanceValidator(validationSupportChain);
// Error on unknown profile: true — resources claiming conformance to
// a profile that is not loaded in the chain cause a validation error.
// This prevents vendors from submitting resources with invented profile URLs.
validator.setErrorForUnknownProfiles(true);
// No extensions allowed that are not declared in the IG.
// Vendors must use only extensions defined in BD Core IG.
// Set to false during initial onboarding if vendors have custom extensions
// not yet added to the IG.
validator.setNoExtensibleWarnings(false);
// Assume valid rest references: false — validate that all relative
// references point to resource types that exist in the server.
validator.setAssumeValidRestReferences(false);
return validator;
}
/**
* RequestValidatingInterceptor — enforces validation on ALL incoming writes.
*
* <p>Registered with the HAPI RestfulServer. Intercepts every CREATE,
* UPDATE, and PATCH request. Validates the resource against the support chain
* before the JPA persistence layer is invoked.
*
* <p>On validation failure: returns HTTP 422 Unprocessable Entity with a
* FHIR OperationOutcome containing:
* <ul>
* <li>Issue severity: error</li>
* <li>Issue code: processing or business-rule</li>
* <li>Diagnostics: human-readable description of the violation</li>
* <li>Expression: FHIRPath of the offending element</li>
* </ul>
*
* <p>IMPORTANT: {@code setFailOnSeverity(ResultSeverityEnum.ERROR)} is the
* correct setting. {@code WARNING} severity issues do not cause rejection.
* Only {@code ERROR} and {@code FATAL} cause 422 rejection.
*/
@Bean
public RequestValidatingInterceptor requestValidatingInterceptor(
IValidatorModule fhirInstanceValidator) {
RequestValidatingInterceptor interceptor = new RequestValidatingInterceptor();
interceptor.addValidatorModule(fhirInstanceValidator);
// Reject on ERROR severity — WARNING is logged but not rejected.
// This is the correct national HIE setting: reject anything that
// violates a SHALL or MUST constraint; warn on SHOULD constraints.
interceptor.setFailOnSeverity(ResultSeverityEnum.ERROR);
// Add validation results to the response OperationOutcome so vendors
// can see exactly which elements failed and why.
interceptor.setAddResponseHeadersOnSeverity(ResultSeverityEnum.INFORMATION);
// Validate requests only — not responses (performance: see application.yaml)
interceptor.setValidateResponses(false);
log.info("RequestValidatingInterceptor configured: failOnSeverity=ERROR, " +
"validateRequests=true, validateResponses=false");
return interceptor;
}
// =========================================================================
// RESTFUL SERVER CONFIGURATION
// =========================================================================
/**
* RestfulServer post-configuration.
*
* <p>Called after HAPI's Spring Boot auto-configuration has created the
* RestfulServer. Registers our interceptors and configures server metadata.
*
* <p>Interceptor registration order matters:
* <ol>
* <li>{@link bd.gov.dghs.fhir.interceptor.KeycloakJwtInterceptor} — runs first,
* rejects unauthenticated/unauthorised requests before any FHIR processing.</li>
* <li>{@link RequestValidatingInterceptor} — runs after auth, validates resource
* content before persistence.</li>
* <li>{@link bd.gov.dghs.fhir.interceptor.AuditEventInterceptor} — runs after
* persistence, emits audit records for accepted and rejected requests.</li>
* </ol>
*
* <p>Note: interceptors are registered in FhirServerConfig but their beans
* are defined in their respective classes. This method wires them into the server.
* The actual @Bean definitions are in KeycloakJwtInterceptor.java and
* AuditEventInterceptor.java (Steps 7 and 9 respectively).
*/
@Bean
public RestfulServerConfigurer restfulServerConfigurer() {
return server -> {
// Server metadata
server.setServerName("BD FHIR National Repository");
server.setServerVersion(igVersion);
server.setImplementationDescription(
"National FHIR R4 repository for Bangladesh. " +
"Conforms to BD Core FHIR IG v" + igVersion + ". " +
"Published by DGHS/MoHFW Bangladesh.");
// Interceptors registered in correct order.
// Keycloak and Audit interceptors are injected by Spring —
// they are declared as @Bean in their own classes and
// Spring injects them here via method parameter injection.
// The actual parameter injection happens in the overriding
// @Configuration class that extends this configurer.
log.info("RestfulServer configured: serverAddress={}", serverAddress);
};
}
/**
* Functional interface for RestfulServer post-configuration.
* Allows the configurer to be injected and applied by HAPI's starter.
*/
@FunctionalInterface
public interface RestfulServerConfigurer {
void configure(RestfulServer server);
}
// =========================================================================
// UNVALIDATED PROFILE TAG INTERCEPTOR
// =========================================================================
/**
* Adds {@code unvalidated-profile} meta tag to resources of types that
* are not profiled in BD Core IG.
*
* <p>BD Core IG profiles the following resource types:
* Patient, Condition, Encounter, Observation, Practitioner,
* Organization, Location, Medication, MedicationRequest, Immunization.
*
* <p>All other resource types (e.g., Provenance, DocumentReference,
* DiagnosticReport) are stored with a tag indicating they have not been
* validated against a BD Core profile. They are NOT rejected — rejection
* is reserved for resources that claim BD Core profile conformance but fail.
*
* <p>Tag: {@code https://fhir.dghs.gov.bd/tags | unvalidated-profile}
*
* <p>Queryable via: {@code GET /fhir/[type]?_tag=https://fhir.dghs.gov.bd/tags|unvalidated-profile}
*/
@Bean
public ca.uhn.fhir.rest.server.interceptor.ServerOperationInterceptorAdapter
unvalidatedProfileTagInterceptor() {
return new ca.uhn.fhir.rest.server.interceptor.ServerOperationInterceptorAdapter() {
@Override
public void resourceCreated(
ca.uhn.fhir.rest.api.server.RequestDetails theRequest,
org.hl7.fhir.instance.model.api.IBaseResource theResource) {
tagIfUnknownType(theResource);
}
@Override
public void resourceUpdated(
ca.uhn.fhir.rest.api.server.RequestDetails theRequest,
org.hl7.fhir.instance.model.api.IBaseResource theOldResource,
org.hl7.fhir.instance.model.api.IBaseResource theNewResource) {
tagIfUnknownType(theNewResource);
}
private void tagIfUnknownType(
org.hl7.fhir.instance.model.api.IBaseResource resource) {
if (!(resource instanceof Resource r4Resource)) {
return;
}
String resourceType = r4Resource.getResourceType().name();
if (!BD_CORE_PROFILE_RESOURCE_TYPES.contains(resourceType)) {
Meta meta = r4Resource.getMeta();
// Check if tag already present (idempotent)
boolean alreadyTagged = meta.getTag().stream().anyMatch(coding ->
unvalidatedTagSystem.equals(coding.getSystem()) &&
unvalidatedTagCode.equals(coding.getCode()));
if (!alreadyTagged) {
meta.addTag()
.setSystem(unvalidatedTagSystem)
.setCode(unvalidatedTagCode)
.setDisplay("Resource type not profiled in BD Core IG " +
igVersion + " — stored without profile validation");
log.info("Added unvalidated-profile tag to {} resource",
resourceType);
}
}
}
};
}
// =========================================================================
// APP PROPERTIES
// =========================================================================
/**
* HAPI AppProperties — configuration object read by HAPI's Spring Boot
* auto-configuration. We customise specific properties here and let HAPI
* auto-configure the rest from application.yaml hapi.fhir.* properties.
*/
@Bean
public AppProperties appProperties() {
AppProperties props = new AppProperties();
props.setServer_address(serverAddress);
props.setFhir_version(ca.uhn.fhir.context.FhirVersionEnum.R4);
props.setAllow_external_references(true);
props.setAllow_multiple_delete(false);
props.setBulk_export_enabled(true);
props.setNarrative_enabled(false);
return props;
}
/**
* Startup validation: verifies that the IG package exists on the classpath
* before the application accepts any traffic. Fails fast at startup rather
* than failing on the first validation call.
*/
@PostConstruct
public void validateIgPackagePresent() {
String classpathPath = igPackageClasspath.replace("classpath:", "");
try (InputStream is = getClass().getClassLoader().getResourceAsStream(classpathPath)) {
if (is == null) {
throw new IllegalStateException(
"STARTUP FAILURE: BD Core IG package not found at classpath:" +
classpathPath + ". " +
"The Docker image was built without the IG package. " +
"Rebuild the image with the .tgz present in " +
"src/main/resources/packages/.");
}
// Verify non-empty (a zero-byte file would indicate a build error)
int firstByte = is.read();
if (firstByte == -1) {
throw new IllegalStateException(
"STARTUP FAILURE: BD Core IG package at classpath:" +
classpathPath + " is empty (zero bytes). " +
"Rebuild the image with a valid .tgz file.");
}
log.info("IG package presence verified: classpath:{}", classpathPath);
} catch (IOException e) {
throw new IllegalStateException(
"Failed to verify IG package: " + e.getMessage(), e);
}
}
}

View File

@@ -0,0 +1,175 @@
package bd.gov.dghs.fhir.config;
import bd.gov.dghs.fhir.interceptor.AuditEventInterceptor;
import bd.gov.dghs.fhir.interceptor.KeycloakJwtInterceptor;
import ca.uhn.fhir.rest.server.RestfulServer;
import ca.uhn.fhir.rest.server.interceptor.RequestValidatingInterceptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.web.servlet.FilterRegistrationBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.Ordered;
import jakarta.servlet.Filter;
import jakarta.servlet.FilterChain;
import jakarta.servlet.ServletRequest;
import jakarta.servlet.ServletResponse;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
/**
* Security configuration — wires interceptors into the HAPI RestfulServer
* in the correct order and registers servlet filters for non-FHIR paths.
*
* <h2>Interceptor registration order</h2>
* <p>HAPI processes interceptors in registration order for pre-request hooks
* and in reverse registration order for post-request hooks.
*
* <p>Pre-request order (first registered = first executed):
* <ol>
* <li>{@link KeycloakJwtInterceptor} — auth must run before everything else.
* Unauthenticated requests never reach validation or persistence.</li>
* <li>{@link RequestValidatingInterceptor} — profile + OCL validation.
* Runs after auth (no point validating unauthenticated submissions)
* and before persistence (validates before writing).</li>
* <li>{@link AuditEventInterceptor} — audit runs last pre-persistence.
* Has access to auth context (set by KeycloakJwtInterceptor) and
* validation outcome (set by RequestValidatingInterceptor).</li>
* </ol>
*
* <h2>RestfulServer customiser</h2>
* <p>HAPI 7.x provides the {@code IServerInterceptorCustomizer} mechanism
* for registering interceptors into the auto-configured RestfulServer.
* We implement this via a {@code HapiRestfulServerCustomizer} bean that
* Spring picks up automatically.
*/
@Configuration
public class SecurityConfig {
private static final Logger log = LoggerFactory.getLogger(SecurityConfig.class);
/**
* HAPI RestfulServer customiser — registers all interceptors in correct order.
*
* <p>This bean is discovered by HAPI's Spring Boot auto-configuration via
* the {@code RestfulServerCustomizer} interface. HAPI calls
* {@code customize(RestfulServer)} after the server is constructed but
* before it begins serving requests.
*/
@Bean
public ca.uhn.fhir.jpa.starter.util.IServerInterceptorCustomizer serverInterceptorCustomizer(
KeycloakJwtInterceptor keycloakJwtInterceptor,
RequestValidatingInterceptor requestValidatingInterceptor,
AuditEventInterceptor auditEventInterceptor,
FhirServerConfig.RestfulServerConfigurer restfulServerConfigurer) {
return server -> {
// Order is critical — see class Javadoc
server.registerInterceptor(keycloakJwtInterceptor);
server.registerInterceptor(requestValidatingInterceptor);
server.registerInterceptor(auditEventInterceptor);
// Apply server metadata configuration from FhirServerConfig
restfulServerConfigurer.configure(server);
// Disable HAPI's built-in ResponseHighlighterInterceptor in production.
// It adds HTML rendering for browser requests — unnecessary overhead
// in a machine-to-machine API. Enabled in dev profile only.
// server.registerInterceptor(new ResponseHighlighterInterceptor());
// ↑ Intentionally commented out — remove comment for dev profile.
log.info("HAPI RestfulServer interceptors registered: " +
"KeycloakJwt → RequestValidating → AuditEvent");
};
}
/**
* Servlet filter that enforces HTTPS-only access at the application layer.
*
* <p>nginx handles TLS termination and should be configured to reject
* plain HTTP. This filter is a defence-in-depth measure: if a request
* somehow reaches the HAPI JVM on port 8080 without going through nginx,
* and the {@code X-Forwarded-Proto} header is absent or http, the filter
* blocks it.
*
* <p>In production behind nginx: nginx sets {@code X-Forwarded-Proto: https}
* on all proxied requests. This filter allows those through.
*
* <p>Exception: health check paths are allowed regardless of protocol
* (load balancer health probes originate from the internal network
* and do not go through nginx).
*/
@Bean
public FilterRegistrationBean<Filter> httpsEnforcementFilter() {
FilterRegistrationBean<Filter> registration = new FilterRegistrationBean<>();
registration.setFilter((ServletRequest req, ServletResponse res, FilterChain chain) -> {
HttpServletRequest httpReq = (HttpServletRequest) req;
HttpServletResponse httpRes = (HttpServletResponse) res;
String path = httpReq.getRequestURI();
// Always allow health check paths regardless of protocol
if (path.startsWith("/actuator/health")) {
chain.doFilter(req, res);
return;
}
// Check X-Forwarded-Proto header set by nginx
String forwardedProto = httpReq.getHeader("X-Forwarded-Proto");
if (forwardedProto != null && "http".equalsIgnoreCase(forwardedProto)) {
log.warn("Rejected plain HTTP request: path={} ip={}",
path, httpReq.getRemoteAddr());
httpRes.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY);
httpRes.setHeader("Location",
"https://fhir.dghs.gov.bd" + path);
return;
}
chain.doFilter(req, res);
});
registration.addUrlPatterns("/*");
registration.setOrder(Ordered.HIGHEST_PRECEDENCE);
registration.setName("httpsEnforcementFilter");
return registration;
}
/**
* Servlet filter that adds security response headers to all responses.
*
* <p>These headers are defence-in-depth — nginx also sets them.
* Duplicate headers are acceptable and ensure they are present even
* if nginx configuration changes.
*/
@Bean
public FilterRegistrationBean<Filter> securityHeadersFilter() {
FilterRegistrationBean<Filter> registration = new FilterRegistrationBean<>();
registration.setFilter((ServletRequest req, ServletResponse res, FilterChain chain) -> {
HttpServletResponse httpRes = (HttpServletResponse) res;
// Prevent MIME type sniffing
httpRes.setHeader("X-Content-Type-Options", "nosniff");
// Prevent framing (clickjacking)
httpRes.setHeader("X-Frame-Options", "DENY");
// Strict transport security — 1 year, include subdomains
httpRes.setHeader("Strict-Transport-Security",
"max-age=31536000; includeSubDomains");
// No referrer information in cross-origin requests
httpRes.setHeader("Referrer-Policy", "no-referrer");
// Disable caching for FHIR responses (they contain patient data)
httpRes.setHeader("Cache-Control",
"no-store, no-cache, must-revalidate, private");
httpRes.setHeader("Pragma", "no-cache");
chain.doFilter(req, res);
});
registration.addUrlPatterns("/*");
registration.setOrder(Ordered.HIGHEST_PRECEDENCE + 1);
registration.setName("securityHeadersFilter");
return registration;
}
}

View File

@@ -0,0 +1,317 @@
package bd.gov.dghs.fhir.init;
import ca.uhn.fhir.jpa.packages.NpmPackageValidationSupport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import java.time.Duration;
import java.time.Instant;
/**
* Initialises the BD Core IG package in HAPI's NPM package registry
* using a PostgreSQL advisory lock to prevent race conditions on
* multi-replica startup.
*
* <h2>Problem</h2>
* <p>HAPI writes IG package metadata to {@code NPM_PACKAGE} and
* {@code NPM_PACKAGE_VER} tables on first load. With N replicas starting
* simultaneously, all N JVMs attempt to INSERT the same package record
* concurrently. HAPI uses an upsert internally, but the window between
* SELECT (does this package exist?) and INSERT (write the record) is not
* atomic — concurrent replicas race through this window and produce either
* duplicate key errors (noisy but harmless) or, in rare cases under high
* contention, partial writes that leave the NPM_PACKAGE table in an
* inconsistent state.
*
* <h2>Solution</h2>
* <p>Before loading the IG, each replica attempts to acquire a PostgreSQL
* session-level advisory lock with a deterministic lock ID derived from
* the IG package name. Only one replica acquires the lock at a time.
* The first replica loads the package and writes the metadata. Subsequent
* replicas acquire the lock after the first releases it, find the package
* already registered, and skip the insert — producing a clean INFO log
* rather than an ERROR.
*
* <h2>Advisory lock key</h2>
* <p>PostgreSQL advisory lock keys are 64-bit integers. We derive the key
* by taking {@code Math.abs(packageId.hashCode())} where {@code packageId}
* is the BD Core IG package ID string. This is deterministic across all
* replicas (same JVM hashCode for the same string on the same JVM version)
* and unique enough for this single-package use case.
*
* <p>Note: Java {@code String.hashCode()} is stable within a JVM instance
* but is NOT guaranteed to be stable across JVM versions. Since all replicas
* run the same Docker image (same JVM version), this is safe. If you ever
* run replicas with different JVM versions simultaneously (you should not),
* replace with a CRC32 or FNV hash.
*
* <h2>Lock release</h2>
* <p>The advisory lock is released explicitly after IG load completes.
* If the JVM crashes mid-load, PostgreSQL releases session-level advisory
* locks automatically when the connection closes — no manual cleanup required.
*
* <h2>Implements InitializingBean</h2>
* <p>{@link InitializingBean#afterPropertiesSet()} is called by Spring after
* all {@code @Value} fields are injected and all dependencies are available,
* but before the application context is marked as ready to serve requests.
* This is the correct lifecycle hook for startup initialisation that must
* complete before the server accepts traffic.
*/
@Component
public class IgPackageInitializer implements InitializingBean {
private static final Logger log = LoggerFactory.getLogger(IgPackageInitializer.class);
/**
* Advisory lock key namespace — all BD FHIR IG locks use this prefix
* to avoid collision with any other advisory locks in the database.
* Value chosen arbitrarily — just needs to be a consistent namespace constant.
*/
private static final long ADVISORY_LOCK_NAMESPACE = 0xBD_FHIR_00L;
/**
* Maximum time to wait for the advisory lock before giving up.
* If lock acquisition exceeds this duration, the replica has likely
* encountered a deadlock or the lock-holding replica has crashed mid-load.
* Fail startup rather than waiting indefinitely.
*/
private static final Duration LOCK_TIMEOUT = Duration.ofSeconds(120);
private final NpmPackageValidationSupport npmPackageValidationSupport;
private final JdbcTemplate fhirJdbcTemplate;
@Value("${bd.fhir.ig.package-classpath}")
private String igPackageClasspath;
@Value("${bd.fhir.ig.version}")
private String igVersion;
public IgPackageInitializer(
NpmPackageValidationSupport npmPackageValidationSupport,
JdbcTemplate fhirJdbcTemplate) {
this.npmPackageValidationSupport = npmPackageValidationSupport;
this.fhirJdbcTemplate = fhirJdbcTemplate;
}
/**
* Called by Spring after all properties are injected.
* Acquires advisory lock, loads IG, releases lock.
*/
@Override
@Transactional(propagation = Propagation.NOT_SUPPORTED)
// NOT_SUPPORTED: advisory lock must be on a direct JDBC connection,
// not inside a Spring-managed transaction. Advisory locks acquired
// inside a transaction are released when the transaction commits —
// we need the lock held until after the IG load completes, which
// spans multiple internal HAPI transactions.
public void afterPropertiesSet() throws Exception {
String packageId = derivePackageId(igPackageClasspath);
long lockKey = deriveLockKey(packageId);
log.info("IG package initialisation starting: packageId={} version={} lockKey={}",
packageId, igVersion, lockKey);
Instant lockStart = Instant.now();
// Attempt to acquire advisory lock with timeout.
// pg_try_advisory_lock returns true immediately if lock acquired,
// false if already held by another session.
// We poll with backoff rather than using pg_advisory_lock (which blocks
// indefinitely) to respect LOCK_TIMEOUT.
boolean lockAcquired = false;
long backoffMs = 250;
while (Duration.between(lockStart, Instant.now()).compareTo(LOCK_TIMEOUT) < 0) {
Boolean acquired = fhirJdbcTemplate.queryForObject(
"SELECT pg_try_advisory_lock(?)",
Boolean.class,
lockKey);
if (Boolean.TRUE.equals(acquired)) {
lockAcquired = true;
log.info("Advisory lock acquired: lockKey={} waitedMs={}",
lockKey,
Duration.between(lockStart, Instant.now()).toMillis());
break;
}
log.debug("Advisory lock contended — waiting {}ms: lockKey={}", backoffMs, lockKey);
Thread.sleep(backoffMs);
backoffMs = Math.min(backoffMs * 2, 5000); // exponential backoff, cap at 5s
}
if (!lockAcquired) {
throw new IllegalStateException(
"Failed to acquire IG package advisory lock within " +
LOCK_TIMEOUT.getSeconds() + " seconds. " +
"lockKey=" + lockKey + ". " +
"This may indicate a crashed replica holding the lock — " +
"check PostgreSQL pg_locks for session holding key " + lockKey + ".");
}
try {
performIgLoad(packageId);
} finally {
// Always release the lock, even if IG load fails.
// Other replicas are blocked waiting for this lock —
// they will see the partial state and attempt their own load.
try {
fhirJdbcTemplate.queryForObject(
"SELECT pg_advisory_unlock(?)",
Boolean.class,
lockKey);
log.info("Advisory lock released: lockKey={}", lockKey);
} catch (Exception e) {
log.error("Failed to release advisory lock: lockKey={} error={}",
lockKey, e.getMessage());
// Non-fatal: PostgreSQL releases session locks on connection close.
// The connection will be returned to HikariCP and eventually
// closed/recycled, releasing the lock automatically.
}
}
}
// =========================================================================
// IG load logic
// =========================================================================
/**
* Performs the actual IG package registration.
*
* <p>Checks whether the package is already registered before attempting
* to load. This handles the case where replicas 2..N acquire the lock
* after replica 1 has already loaded and released it.
*/
private void performIgLoad(String packageId) {
// Check if already registered by a previous replica or a previous run
boolean alreadyLoaded = isPackageAlreadyRegistered(packageId, igVersion);
if (alreadyLoaded) {
log.info("IG package already registered in NPM_PACKAGE — " +
"skipping load (this replica is not first): packageId={} version={}",
packageId, igVersion);
// NpmPackageValidationSupport was already loaded from classpath
// in FhirServerConfig.npmPackageValidationSupport() bean.
// The in-memory validation support is ready regardless of DB state.
return;
}
// First replica: load the package
log.info("Loading BD Core IG package: packageId={} version={} classpath={}",
packageId, igVersion, igPackageClasspath);
Instant loadStart = Instant.now();
try {
// NpmPackageValidationSupport.loadPackageFromClasspath() does two things:
// 1. Loads StructureDefinitions, ValueSets, CodeSystems into in-memory cache
// 2. Writes NPM_PACKAGE and NPM_PACKAGE_VER records to the database
// The in-memory load already happened in FhirServerConfig (bean initialisation).
// Here we ensure the database records are written exactly once.
String classpathPath = igPackageClasspath.replace("classpath:", "");
npmPackageValidationSupport.loadPackageFromClasspath(
"classpath:" + classpathPath);
long loadMs = Duration.between(loadStart, Instant.now()).toMillis();
log.info("BD Core IG package loaded successfully: packageId={} version={} " +
"durationMs={}", packageId, igVersion, loadMs);
} catch (Exception e) {
// If loading fails, log the error but do not crash startup.
// The in-memory validation support loaded in FhirServerConfig
// is the primary validation mechanism. DB registration failure
// is an operational issue but not a functional blocker for validation.
log.error("IG package DB registration failed (in-memory validation " +
"still active): packageId={} version={} error={}",
packageId, igVersion, e.getMessage(), e);
}
}
/**
* Checks whether the IG package is already registered in the NPM_PACKAGE table.
*
* <p>Queries {@code NPM_PACKAGE} joined to {@code NPM_PACKAGE_VER} to check
* both package existence and version match. A package registered at a different
* version (e.g., 0.1.0 when deploying 0.2.1) is treated as not registered —
* the new version will be added alongside the old one.
*/
private boolean isPackageAlreadyRegistered(String packageId, String version) {
try {
Integer count = fhirJdbcTemplate.queryForObject(
"SELECT COUNT(*) FROM NPM_PACKAGE np " +
"JOIN NPM_PACKAGE_VER npv ON npv.PKG_PID = np.PID " +
"WHERE np.PACKAGE_ID = ? AND npv.VERSION_ID = ?",
Integer.class,
packageId, version);
return count != null && count > 0;
} catch (Exception e) {
// Query failure (e.g., table not yet created) — treat as not registered.
// Flyway should have run V1 before this is called, but defensive check.
log.warn("Could not query NPM_PACKAGE table (Flyway may not have run yet): {}",
e.getMessage());
return false;
}
}
// =========================================================================
// Helpers
// =========================================================================
/**
* Derives the NPM package ID from the classpath path.
*
* <p>The package ID is the FHIR NPM package identifier embedded in the
* .tgz filename before the version suffix.
* Example: {@code packages/bd.gov.dghs.core-0.2.1.tgz} → {@code bd.gov.dghs.core}
*/
private String derivePackageId(String classpathPath) {
String filename = classpathPath;
// Strip classpath: prefix and directory
int lastSlash = filename.lastIndexOf('/');
if (lastSlash >= 0) {
filename = filename.substring(lastSlash + 1);
}
// Strip .tgz extension
if (filename.endsWith(".tgz")) {
filename = filename.substring(0, filename.length() - 4);
}
// Strip version suffix (last hyphen-separated segment that starts with digit)
int lastHyphen = filename.lastIndexOf('-');
if (lastHyphen > 0) {
String versionPart = filename.substring(lastHyphen + 1);
if (!versionPart.isEmpty() && Character.isDigit(versionPart.charAt(0))) {
filename = filename.substring(0, lastHyphen);
}
}
return filename; // e.g., "bd.gov.dghs.core"
}
/**
* Derives a stable 64-bit advisory lock key from a package ID string.
*
* <p>Combines the namespace constant with the package ID hash to produce
* a key that is:
* <ul>
* <li>Deterministic: same packageId always produces the same key</li>
* <li>Namespaced: BD FHIR locks are distinguishable from other advisory locks</li>
* <li>Positive: PostgreSQL advisory lock keys must be valid long values</li>
* </ul>
*/
private long deriveLockKey(String packageId) {
// Use a djb2-style hash for stability across JVM invocations.
// Java String.hashCode() is stable within a JVM version but the spec
// does not guarantee cross-version stability. djb2 is fully specified.
long hash = 5381L;
for (char c : packageId.toCharArray()) {
hash = ((hash << 5) + hash) + c;
}
// XOR with namespace to distinguish from unrelated advisory locks
return ADVISORY_LOCK_NAMESPACE ^ Math.abs(hash);
}
}

View File

@@ -0,0 +1,601 @@
package bd.gov.dghs.fhir.interceptor;
import bd.gov.dghs.fhir.audit.AuditEventEmitter;
import bd.gov.dghs.fhir.audit.RejectedSubmissionSink;
import bd.gov.dghs.fhir.validator.ClusterExpressionValidator;
import ca.uhn.fhir.interceptor.api.Hook;
import ca.uhn.fhir.interceptor.api.Interceptor;
import ca.uhn.fhir.interceptor.api.Pointcut;
import ca.uhn.fhir.rest.api.RestOperationTypeEnum;
import ca.uhn.fhir.rest.api.server.RequestDetails;
import ca.uhn.fhir.rest.server.exceptions.BaseServerResponseException;
import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
import org.hl7.fhir.instance.model.api.IBaseResource;
import org.hl7.fhir.r4.model.OperationOutcome;
import org.hl7.fhir.r4.model.Resource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import java.time.Instant;
import java.util.List;
import java.util.UUID;
/**
* HAPI FHIR interceptor that records every resource submission outcome
* to the immutable audit store.
*
* <h2>Interceptor responsibilities</h2>
* <ol>
* <li><b>Cluster expression pre-validation</b> — invokes
* {@link ClusterExpressionValidator} before HAPI's profile validation
* runs. Cluster expression failures are converted to 422 responses
* and recorded as REJECTED in the audit trail.</li>
* <li><b>Accepted resource audit</b> — after successful storage, emits
* an {@code AuditEvent} to {@link AuditEventEmitter} (async).</li>
* <li><b>Rejected resource audit</b> — on any 422 or 401 exception,
* captures the full resource payload and rejection details to
* {@link RejectedSubmissionSink} (async) and emits an AuditEvent.</li>
* </ol>
*
* <h2>Hooks and ordering</h2>
* <pre>
* SERVER_INCOMING_REQUEST_PRE_HANDLED ← KeycloakJwtInterceptor (Step 8)
* SERVER_INCOMING_REQUEST_PRE_HANDLED ← This class: cluster expression validation
* (registered after Keycloak, runs second)
* [HAPI profile validation runs here — RequestValidatingInterceptor]
* [HAPI persistence runs here]
* STORAGE_PRESTORAGE_RESOURCE_CREATED ← This class: unvalidated-profile tag check
* SERVER_OUTGOING_RESPONSE ← This class: accepted resource audit
* SERVER_PROCESSING_COMPLETED ← This class: exception path audit
* </pre>
*
* <h2>Async audit writes</h2>
* <p>All audit writes ({@link AuditEventEmitter} and {@link RejectedSubmissionSink})
* are executed on Spring's async task executor (configured in application.yaml
* {@code spring.task.execution}). The FHIR request thread returns the HTTP response
* to the vendor immediately — audit writes do not add to response latency.
*
* <p>Consequence: in the rare case of audit write failure (postgres-audit unavailable),
* the FHIR operation succeeds but the audit record is missing. The audit write
* failure is logged at ERROR level. DGHS must treat audit write failures as
* high-priority incidents — a gap in the audit trail violates the immutability
* requirement. If this is unacceptable, change audit writes to synchronous and
* accept the latency cost.
*
* <h2>Request attribute contract</h2>
* <p>This class reads request attributes set by {@link KeycloakJwtInterceptor}:
* <ul>
* <li>{@code BD_FHIR_CLIENT_ID} — Keycloak client_id</li>
* <li>{@code BD_FHIR_FACILITY} — sending facility identifier</li>
* <li>{@code BD_FHIR_SUBJECT} — JWT sub claim</li>
* <li>{@code BD_FHIR_REQUEST_ID} — per-request UUID</li>
* <li>{@code BD_FHIR_AUTH_EXEMPTED} — true for unauthenticated exempted paths</li>
* </ul>
* If KeycloakJwtInterceptor has not set these (e.g., on exempted paths),
* fallback values are used — exempted requests are not audit-logged for
* resource operations.
*/
@Interceptor
@Component
public class AuditEventInterceptor {
private static final Logger log = LoggerFactory.getLogger(AuditEventInterceptor.class);
private final ClusterExpressionValidator clusterExpressionValidator;
private final AuditEventEmitter auditEventEmitter;
private final RejectedSubmissionSink rejectedSubmissionSink;
public AuditEventInterceptor(
ClusterExpressionValidator clusterExpressionValidator,
AuditEventEmitter auditEventEmitter,
RejectedSubmissionSink rejectedSubmissionSink) {
this.clusterExpressionValidator = clusterExpressionValidator;
this.auditEventEmitter = auditEventEmitter;
this.rejectedSubmissionSink = rejectedSubmissionSink;
}
// =========================================================================
// Hook 1: Cluster expression validation — pre-storage, pre-profile-validation
// =========================================================================
/**
* Pre-request hook — runs cluster expression validation before HAPI's
* own profile validation and before any database write.
*
* <p>Only runs for write operations (CREATE, UPDATE, PATCH).
* Read operations (search, read, vread) do not contain resources
* to validate and are passed through immediately.
*
* <p>On cluster expression failure: throws {@link UnprocessableEntityException}
* with a FHIR OperationOutcome. HAPI returns this as HTTP 422. The exception
* is caught by {@link #handleProcessingException} for audit logging.
*/
@Hook(Pointcut.SERVER_INCOMING_REQUEST_PRE_HANDLED)
public void validateClusterExpressions(
RequestDetails requestDetails,
HttpServletRequest servletRequest,
HttpServletResponse servletResponse) {
// Skip reads and non-resource operations
if (!isWriteOperation(requestDetails)) {
return;
}
// Skip if auth was exempted (metadata endpoint, etc.)
if (Boolean.TRUE.equals(
servletRequest.getAttribute(KeycloakJwtInterceptor.REQUEST_ATTR_AUTH_EXEMPTED))) {
return;
}
IBaseResource resource = requestDetails.getResource();
if (!(resource instanceof Resource r4Resource)) {
return; // No resource body or not R4 — let HAPI handle it
}
try {
clusterExpressionValidator.validateResource(r4Resource, requestDetails);
} catch (UnprocessableEntityException e) {
// Cluster validation failed — store resource payload for forensic audit
// before re-throwing (the exception path audit runs in handleProcessingException)
storeClusterRejection(servletRequest, r4Resource, e);
throw e; // HAPI catches this and returns 422
}
}
// =========================================================================
// Hook 2: Accepted resource — post-storage audit
// =========================================================================
/**
* Post-storage hook — fires after a resource is successfully created.
* Emits an ACCEPTED audit event asynchronously.
*/
@Hook(Pointcut.STORAGE_PRESTORAGE_RESOURCE_CREATED)
public void auditResourceCreated(
RequestDetails requestDetails,
IBaseResource resource) {
auditAcceptedOperation(requestDetails, resource, "CREATE");
}
/**
* Post-storage hook — fires after a resource is successfully updated.
*/
@Hook(Pointcut.STORAGE_PRESTORAGE_RESOURCE_UPDATED)
public void auditResourceUpdated(
RequestDetails requestDetails,
IBaseResource oldResource,
IBaseResource newResource) {
auditAcceptedOperation(requestDetails, newResource, "UPDATE");
}
/**
* Post-storage hook — fires after a resource is successfully deleted.
*/
@Hook(Pointcut.STORAGE_PRESTORAGE_RESOURCE_DELETED)
public void auditResourceDeleted(
RequestDetails requestDetails,
IBaseResource resource) {
auditAcceptedOperation(requestDetails, resource, "DELETE");
}
// =========================================================================
// Hook 3: Exception path — rejected resource audit
// =========================================================================
/**
* Exception hook — fires when any HAPI processing exception occurs.
*
* <p>Handles all rejection paths:
* <ul>
* <li>422 from profile validation failure</li>
* <li>422 from OCL terminology rejection</li>
* <li>422 from cluster expression rejection (thrown in Hook 1)</li>
* <li>401 from auth failure (KeycloakJwtInterceptor)</li>
* </ul>
*
* <p>For 422 responses: stores the full resource payload in
* {@code audit.fhir_rejected_submissions} with rejection details.
* For 401 responses: records auth failure event only (no resource payload).
*/
@Hook(Pointcut.SERVER_HANDLE_EXCEPTION)
public boolean handleProcessingException(
RequestDetails requestDetails,
HttpServletRequest servletRequest,
HttpServletResponse servletResponse,
BaseServerResponseException exception) {
int statusCode = exception.getStatusCode();
// Only audit 401 and 422 — other errors (404, 500) are operational,
// not submission rejections.
if (statusCode == 422) {
auditRejectedSubmission(requestDetails, servletRequest, exception,
classifyRejectionCode(exception));
} else if (statusCode == 401) {
auditAuthFailure(requestDetails, servletRequest, exception);
}
// Return false = do not suppress the exception — HAPI returns the HTTP response
return false;
}
// =========================================================================
// Audit emission helpers
// =========================================================================
private void auditAcceptedOperation(
RequestDetails requestDetails,
IBaseResource resource,
String operation) {
if (isExemptedRequest(requestDetails)) {
return;
}
AuditContext ctx = extractAuditContext(requestDetails);
String resourceType = resource != null ?
resource.fhirType() : requestDetails.getResourceName();
String resourceId = resource instanceof Resource r4 ?
r4.getIdElement().getIdPart() : null;
log.info("Resource {} accepted: resourceType={} resourceId={} " +
"clientId={} facility={} requestId={}",
operation, resourceType, resourceId,
ctx.clientId, ctx.facility, ctx.requestId);
// Async audit write — does not block response
auditEventEmitter.emitAsync(AuditEventEmitter.AuditRecord.builder()
.eventId(UUID.randomUUID())
.eventTime(Instant.now())
.eventType("OPERATION")
.operation(operation)
.resourceType(resourceType)
.resourceId(resourceId)
.outcome("ACCEPTED")
.clientId(ctx.clientId)
.subject(ctx.subject)
.sendingFacility(ctx.facility)
.requestIp(ctx.requestIp)
.requestId(ctx.requestId)
.build());
}
private void auditRejectedSubmission(
RequestDetails requestDetails,
HttpServletRequest servletRequest,
BaseServerResponseException exception,
String rejectionCode) {
AuditContext ctx = extractAuditContext(requestDetails, servletRequest);
String resourceType = requestDetails.getResourceName();
String operation = deriveOperation(requestDetails);
// Extract OperationOutcome messages for structured logging
List<String> issueMessages = extractIssueMessages(exception);
String primaryMessage = issueMessages.isEmpty() ?
exception.getMessage() : issueMessages.get(0);
log.info("Resource submission rejected: resourceType={} rejectionCode={} " +
"clientId={} facility={} requestId={} reason={}",
resourceType, rejectionCode, ctx.clientId,
ctx.facility, ctx.requestId, primaryMessage);
// Store rejected payload if this was a write operation with a resource body
IBaseResource resource = requestDetails.getResource();
if (resource != null && isWriteOperation(requestDetails)) {
rejectedSubmissionSink.storeAsync(
RejectedSubmissionSink.RejectedSubmission.builder()
.submissionId(UUID.randomUUID())
.submissionTime(Instant.now())
.eventId(UUID.randomUUID())
.resourceType(resourceType)
.resourcePayload(serializeResource(requestDetails, resource))
.rejectionCode(rejectionCode)
.rejectionReason(primaryMessage)
.elementPath(extractFirstElementPath(exception))
.violatedProfile(extractViolatedProfile(exception))
.invalidCode(extractInvalidCode(exception))
.invalidSystem(extractInvalidSystem(exception))
.sendingFacility(ctx.facility)
.clientId(ctx.clientId)
.build());
}
// Emit audit event
auditEventEmitter.emitAsync(AuditEventEmitter.AuditRecord.builder()
.eventId(UUID.randomUUID())
.eventTime(Instant.now())
.eventType("VALIDATION_FAILURE")
.operation(operation)
.resourceType(resourceType)
.outcome("REJECTED")
.outcomeDetail(primaryMessage)
.clientId(ctx.clientId)
.subject(ctx.subject)
.sendingFacility(ctx.facility)
.requestIp(ctx.requestIp)
.requestId(ctx.requestId)
.validationMessages(issueMessages)
.build());
}
private void auditAuthFailure(
RequestDetails requestDetails,
HttpServletRequest servletRequest,
BaseServerResponseException exception) {
AuditContext ctx = extractAuditContext(requestDetails, servletRequest);
log.info("Auth failure: clientId={} requestId={} ip={} reason={}",
ctx.clientId, ctx.requestId, ctx.requestIp, exception.getMessage());
auditEventEmitter.emitAsync(AuditEventEmitter.AuditRecord.builder()
.eventId(UUID.randomUUID())
.eventTime(Instant.now())
.eventType("AUTH_FAILURE")
.operation(deriveOperation(requestDetails))
.resourceType(requestDetails.getResourceName())
.outcome("REJECTED")
.outcomeDetail(exception.getMessage())
.clientId(ctx.clientId != null ? ctx.clientId : "unauthenticated")
.subject(ctx.subject != null ? ctx.subject : "unknown")
.sendingFacility(ctx.facility)
.requestIp(ctx.requestIp)
.requestId(ctx.requestId)
.build());
}
private void storeClusterRejection(
HttpServletRequest servletRequest,
Resource r4Resource,
UnprocessableEntityException e) {
AuditContext ctx = extractAuditContextFromServlet(servletRequest);
String resourceType = r4Resource.getResourceType().name();
rejectedSubmissionSink.storeAsync(
RejectedSubmissionSink.RejectedSubmission.builder()
.submissionId(UUID.randomUUID())
.submissionTime(Instant.now())
.eventId(UUID.randomUUID())
.resourceType(resourceType)
.resourcePayload(serializeR4Resource(r4Resource))
.rejectionCode("CLUSTER_EXPRESSION_INVALID")
.rejectionReason(extractFirstMessage(e))
.elementPath(extractFirstElementPath(e))
.sendingFacility(ctx.facility)
.clientId(ctx.clientId)
.build());
}
// =========================================================================
// Context extraction helpers
// =========================================================================
private AuditContext extractAuditContext(RequestDetails requestDetails) {
HttpServletRequest req = (HttpServletRequest)
requestDetails.getServletRequest();
return extractAuditContext(requestDetails, req);
}
private AuditContext extractAuditContext(
RequestDetails requestDetails, HttpServletRequest servletRequest) {
if (servletRequest == null) {
return new AuditContext("unknown", "unknown", "unknown", "unknown", "unknown");
}
return extractAuditContextFromServlet(servletRequest);
}
private AuditContext extractAuditContextFromServlet(HttpServletRequest req) {
String clientId = attrString(req, KeycloakJwtInterceptor.REQUEST_ATTR_CLIENT_ID, "unknown");
String facility = attrString(req, KeycloakJwtInterceptor.REQUEST_ATTR_FACILITY, "unknown");
String subject = attrString(req, KeycloakJwtInterceptor.REQUEST_ATTR_SUBJECT, "unknown");
String requestId = attrString(req, KeycloakJwtInterceptor.REQUEST_ATTR_REQUEST_ID, "unknown");
String ip = extractIp(req);
return new AuditContext(clientId, facility, subject, requestId, ip);
}
private String attrString(HttpServletRequest req, String attr, String fallback) {
Object val = req.getAttribute(attr);
return val != null ? val.toString() : fallback;
}
private String extractIp(HttpServletRequest req) {
String xff = req.getHeader("X-Forwarded-For");
if (xff != null && !xff.isBlank()) {
int comma = xff.indexOf(',');
return comma > 0 ? xff.substring(0, comma).trim() : xff.trim();
}
return req.getRemoteAddr();
}
// =========================================================================
// Rejection classification
// =========================================================================
private String classifyRejectionCode(BaseServerResponseException exception) {
if (!(exception instanceof UnprocessableEntityException uee)) {
return "PROFILE_VIOLATION";
}
// Try to classify by OperationOutcome issue content
OperationOutcome oo = extractOperationOutcome(uee);
if (oo == null) return "PROFILE_VIOLATION";
for (var issue : oo.getIssue()) {
String diag = issue.getDiagnostics();
if (diag == null) continue;
String lower = diag.toLowerCase();
if (lower.contains("cluster") && lower.contains("extension")) {
return "CLUSTER_STEM_MISSING_EXTENSION";
}
if (lower.contains("cluster")) {
return "CLUSTER_EXPRESSION_INVALID";
}
if (lower.contains("icd") || lower.contains("terminology") ||
lower.contains("code") && lower.contains("not valid")) {
if (lower.contains("class") || lower.contains("device") ||
lower.contains("substance")) {
return "TERMINOLOGY_INVALID_CLASS";
}
return "TERMINOLOGY_INVALID_CODE";
}
}
return "PROFILE_VIOLATION";
}
// =========================================================================
// OperationOutcome parsing helpers
// =========================================================================
private OperationOutcome extractOperationOutcome(BaseServerResponseException e) {
if (e.getOperationOutcome() instanceof OperationOutcome oo) {
return oo;
}
return null;
}
private List<String> extractIssueMessages(BaseServerResponseException e) {
OperationOutcome oo = extractOperationOutcome(e);
if (oo == null) return List.of(e.getMessage() != null ? e.getMessage() : "Unknown error");
return oo.getIssue().stream()
.filter(i -> i.getSeverity() == OperationOutcome.IssueSeverity.ERROR ||
i.getSeverity() == OperationOutcome.IssueSeverity.FATAL)
.map(i -> i.getDiagnostics() != null ? i.getDiagnostics() : i.getCode().toCode())
.toList();
}
private String extractFirstMessage(UnprocessableEntityException e) {
List<String> msgs = extractIssueMessages(e);
return msgs.isEmpty() ? e.getMessage() : msgs.get(0);
}
private String extractFirstElementPath(BaseServerResponseException e) {
OperationOutcome oo = extractOperationOutcome(e);
if (oo == null) return null;
return oo.getIssue().stream()
.filter(i -> !i.getExpression().isEmpty())
.map(i -> i.getExpression().get(0).getValue())
.findFirst().orElse(null);
}
private String extractViolatedProfile(BaseServerResponseException e) {
OperationOutcome oo = extractOperationOutcome(e);
if (oo == null) return null;
// Look for a profile URL in diagnostics
return oo.getIssue().stream()
.map(OperationOutcome.OperationOutcomeIssueComponent::getDiagnostics)
.filter(d -> d != null && d.contains("https://fhir.dghs.gov.bd"))
.findFirst().orElse(null);
}
private String extractInvalidCode(BaseServerResponseException e) {
OperationOutcome oo = extractOperationOutcome(e);
if (oo == null) return null;
// Look for a code value in diagnostics — heuristic extraction
return oo.getIssue().stream()
.map(OperationOutcome.OperationOutcomeIssueComponent::getDiagnostics)
.filter(d -> d != null && d.contains("code="))
.map(d -> {
int idx = d.indexOf("code=");
if (idx < 0) return null;
String rest = d.substring(idx + 5);
int end = rest.indexOf(' ');
return end > 0 ? rest.substring(0, end) : rest;
})
.findFirst().orElse(null);
}
private String extractInvalidSystem(BaseServerResponseException e) {
OperationOutcome oo = extractOperationOutcome(e);
if (oo == null) return null;
return oo.getIssue().stream()
.map(OperationOutcome.OperationOutcomeIssueComponent::getDiagnostics)
.filter(d -> d != null && d.contains("system="))
.map(d -> {
int idx = d.indexOf("system=");
if (idx < 0) return null;
String rest = d.substring(idx + 7);
int end = rest.indexOf(' ');
return end > 0 ? rest.substring(0, end) : rest;
})
.findFirst().orElse(null);
}
// =========================================================================
// Serialisation
// =========================================================================
private String serializeResource(RequestDetails requestDetails, IBaseResource resource) {
try {
return requestDetails.getFhirContext()
.newJsonParser()
.encodeResourceToString(resource);
} catch (Exception e) {
log.warn("Could not serialise resource for rejected submission storage: {}",
e.getMessage());
return "{\"error\": \"serialisation_failed\"}";
}
}
private String serializeR4Resource(Resource resource) {
try {
return ca.uhn.fhir.context.FhirContext.forR4Cached()
.newJsonParser()
.encodeResourceToString(resource);
} catch (Exception e) {
log.warn("Could not serialise R4 resource: {}", e.getMessage());
return "{\"error\": \"serialisation_failed\"}";
}
}
// =========================================================================
// Operation classification helpers
// =========================================================================
private boolean isWriteOperation(RequestDetails requestDetails) {
RestOperationTypeEnum op = requestDetails.getRestOperationType();
if (op == null) return false;
return switch (op) {
case CREATE, UPDATE, PATCH, DELETE -> true;
default -> false;
};
}
private String deriveOperation(RequestDetails requestDetails) {
RestOperationTypeEnum op = requestDetails.getRestOperationType();
if (op == null) return "UNKNOWN";
return switch (op) {
case CREATE -> "CREATE";
case UPDATE -> "UPDATE";
case PATCH -> "PATCH";
case DELETE -> "DELETE";
case READ, VREAD -> "READ";
case SEARCH_TYPE, SEARCH_SYSTEM -> "READ";
default -> op.name();
};
}
private boolean isExemptedRequest(RequestDetails requestDetails) {
HttpServletRequest req = (HttpServletRequest) requestDetails.getServletRequest();
if (req == null) return false;
return Boolean.TRUE.equals(
req.getAttribute(KeycloakJwtInterceptor.REQUEST_ATTR_AUTH_EXEMPTED));
}
// =========================================================================
// Inner classes
// =========================================================================
/** Immutable audit context extracted from request attributes */
private record AuditContext(
String clientId,
String facility,
String subject,
String requestId,
String requestIp) {}
}

View File

@@ -0,0 +1,644 @@
package bd.gov.dghs.fhir.interceptor;
import ca.uhn.fhir.interceptor.api.Hook;
import ca.uhn.fhir.interceptor.api.Interceptor;
import ca.uhn.fhir.interceptor.api.Pointcut;
import ca.uhn.fhir.rest.api.server.RequestDetails;
import ca.uhn.fhir.rest.server.exceptions.AuthenticationException;
import ca.uhn.fhir.rest.server.exceptions.ForbiddenOperationException;
import com.nimbusds.jose.JOSEException;
import com.nimbusds.jose.JWSAlgorithm;
import com.nimbusds.jose.jwk.source.DefaultJWKSetCache;
import com.nimbusds.jose.jwk.source.JWKSource;
import com.nimbusds.jose.jwk.source.RemoteJWKSet;
import com.nimbusds.jose.proc.BadJOSEException;
import com.nimbusds.jose.proc.JWSKeySelector;
import com.nimbusds.jose.proc.JWSVerificationKeySelector;
import com.nimbusds.jose.proc.SecurityContext;
import com.nimbusds.jwt.JWTClaimsSet;
import com.nimbusds.jwt.proc.ConfigurableJWTProcessor;
import com.nimbusds.jwt.proc.DefaultJWTClaimsVerifier;
import com.nimbusds.jwt.proc.DefaultJWTProcessor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import jakarta.annotation.PostConstruct;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import java.net.MalformedURLException;
import java.net.URL;
import java.text.ParseException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
/**
* HAPI FHIR interceptor that enforces Keycloak JWT authentication and
* role-based authorisation for all FHIR server requests.
*
* <h2>Token validation sequence</h2>
* <p>For every inbound request (except exempted paths):
* <ol>
* <li>Extract Bearer token from {@code Authorization} header.</li>
* <li>Verify JWT signature against Keycloak JWKS endpoint
* (cached per {@code kid}, TTL 1 hour).</li>
* <li>Verify token is not expired ({@code exp} claim).</li>
* <li>Verify token issuer matches {@code https://auth.dghs.gov.bd/realms/hris}.</li>
* <li>Verify token contains {@code mci-api} role in realm roles
* OR resource access roles.</li>
* <li>Extract {@code sending_facility} and {@code client_id} claims.</li>
* <li>Populate MDC context for structured log correlation.</li>
* <li>Set request attributes for downstream use by
* {@link bd.gov.dghs.fhir.terminology.TerminologyCacheManager} and
* {@link bd.gov.dghs.fhir.audit.AuditEventInterceptor}.</li>
* </ol>
*
* <h2>Exempted paths</h2>
* <ul>
* <li>{@code GET /fhir/metadata} — CapabilityStatement, unauthenticated access allowed.</li>
* <li>{@code GET /actuator/health/**} — load balancer health probes.</li>
* <li>{@code GET /actuator/info} — build info, non-sensitive.</li>
* </ul>
* All other paths require a valid Bearer token with {@code mci-api} role.
* Admin paths ({@code /admin/**}) require {@code fhir-admin} role in addition
* to a valid token — enforced in the respective controller.
*
* <h2>JWKS caching</h2>
* <p>Keycloak signing keys are cached using Nimbus {@link RemoteJWKSet} with
* a {@link DefaultJWKSetCache} (TTL: 1 hour, refresh-ahead: 15 minutes before expiry).
* When a JWT arrives with a {@code kid} not present in the local cache, the cache
* is immediately refreshed from the JWKS endpoint regardless of TTL — this handles
* Keycloak key rotation without a 1-hour lag.
*
* <h2>Role extraction</h2>
* <p>Keycloak embeds roles in two locations within the JWT:
* <pre>
* {
* "realm_access": { "roles": ["mci-api", "offline_access"] },
* "resource_access": {
* "fhir-vendor-org123": { "roles": ["mci-api"] }
* }
* }
* </pre>
* <p>This interceptor checks both locations. The {@code mci-api} role is
* sufficient in either location — the check is OR, not AND.
*
* <h2>Sending facility extraction</h2>
* <p>The sending facility identifier is extracted from a custom Keycloak
* token claim. Keycloak is configured (see ops/keycloak-setup.md) to add
* the facility ID as a claim named {@code sending_facility} via a user
* attribute mapper on the {@code fhir-vendor-{org-id}} client.
* If the claim is absent, the {@code client_id} is used as a fallback
* facility identifier.
*
* <h2>Error responses</h2>
* <ul>
* <li>Missing token: {@code 401 Unauthorized} — "No Bearer token provided"</li>
* <li>Invalid signature: {@code 401 Unauthorized} — "Token signature invalid"</li>
* <li>Expired token: {@code 401 Unauthorized} — "Token has expired"</li>
* <li>Wrong issuer: {@code 401 Unauthorized} — "Token issuer invalid"</li>
* <li>Missing mci-api role: {@code 401 Unauthorized} — "Required role not present"</li>
* </ul>
* All 401 responses include a {@code WWW-Authenticate} header per RFC 6750.
* Error details are intentionally minimal in the response body — full details
* are written to the structured audit log to avoid leaking token content to callers.
*/
@Interceptor
@Component
public class KeycloakJwtInterceptor {
private static final Logger log = LoggerFactory.getLogger(KeycloakJwtInterceptor.class);
// =========================================================================
// Request attribute keys — set on HttpServletRequest for downstream use
// by TerminologyCacheManager, AuditEventInterceptor, and ClusterExpressionValidator
// =========================================================================
/** Boolean — true if token has fhir-admin role */
public static final String REQUEST_ATTR_IS_ADMIN = "BD_FHIR_IS_ADMIN";
/** String — Keycloak client_id claim */
public static final String REQUEST_ATTR_CLIENT_ID = "BD_FHIR_CLIENT_ID";
/** String — sending_facility custom claim (falls back to client_id) */
public static final String REQUEST_ATTR_FACILITY = "BD_FHIR_FACILITY";
/** String — JWT sub claim (service account user ID) */
public static final String REQUEST_ATTR_SUBJECT = "BD_FHIR_SUBJECT";
/** String — per-request UUID for log correlation */
public static final String REQUEST_ATTR_REQUEST_ID = "BD_FHIR_REQUEST_ID";
/** JWTClaimsSet — full parsed claims, available for audit use */
public static final String REQUEST_ATTR_CLAIMS = "BD_FHIR_JWT_CLAIMS";
/** Boolean — true if token validation was bypassed (exempted path) */
public static final String REQUEST_ATTR_AUTH_EXEMPTED = "BD_FHIR_AUTH_EXEMPTED";
// MDC keys — populated for structured log correlation
private static final String MDC_REQUEST_ID = "requestId";
private static final String MDC_CLIENT_ID = "clientId";
private static final String MDC_FACILITY = "sendingFacility";
private static final String MDC_REQUEST_IP = "requestIp";
// Keycloak claim names
private static final String CLAIM_REALM_ACCESS = "realm_access";
private static final String CLAIM_RESOURCE_ACCESS = "resource_access";
private static final String CLAIM_ROLES = "roles";
private static final String CLAIM_SENDING_FACILITY = "sending_facility";
private static final String CLAIM_CLIENT_ID = "azp"; // Keycloak: authorised party
// Paths that do not require authentication
private static final Set<String> EXEMPT_EXACT_PATHS = new HashSet<>(Arrays.asList(
"/fhir/metadata",
"/actuator/health",
"/actuator/health/liveness",
"/actuator/health/readiness",
"/actuator/info"
));
private static final Set<String> EXEMPT_PREFIX_PATHS = new HashSet<>(Arrays.asList(
"/actuator/health/"
));
@Value("${bd.fhir.keycloak.issuer}")
private String expectedIssuer;
@Value("${bd.fhir.keycloak.jwks-url}")
private String jwksUrl;
@Value("${bd.fhir.keycloak.required-role}")
private String requiredRole;
@Value("${bd.fhir.keycloak.admin-role}")
private String adminRole;
@Value("${bd.fhir.keycloak.jwks-cache-ttl-seconds}")
private long jwksCacheTtlSeconds;
// Nimbus JWT processor — thread-safe, reused across all requests
private ConfigurableJWTProcessor<SecurityContext> jwtProcessor;
@PostConstruct
public void initialise() throws MalformedURLException {
// Build JWKS source with DefaultJWKSetCache.
//
// DefaultJWKSetCache parameters:
// lifespan: TTL for a cached key set (1 hour)
// refreshTime: When remaining TTL < refreshTime, proactively refresh.
// Set to 15 minutes — cache refreshes at 45min mark,
// ensuring no gap between expiry and new key availability.
// timeUnit: TimeUnit for the above values
//
// RemoteJWKSet behaviour on unknown kid:
// If a JWT arrives with a kid not in the cached key set, RemoteJWKSet
// immediately fetches the JWKS URL regardless of TTL. This handles
// Keycloak key rotation: new tokens with new kid are validated correctly
// within one JWKS round-trip, not after the cache expires.
DefaultJWKSetCache jwkSetCache = new DefaultJWKSetCache(
jwksCacheTtlSeconds,
(long) (jwksCacheTtlSeconds * 0.75), // refresh at 75% of TTL
TimeUnit.SECONDS
);
JWKSource<SecurityContext> jwkSource = new RemoteJWKSet<>(
new URL(jwksUrl),
null, // default ResourceRetriever (uses HttpURLConnection)
jwkSetCache
);
// Key selector: accept RS256 tokens (Keycloak default signing algorithm).
// Keycloak also supports RS384, RS512, PS256 — if your realm uses
// a different algorithm, add it here. Never accept HS256 (symmetric) —
// it requires sharing the secret with every verifier.
JWSKeySelector<SecurityContext> keySelector =
new JWSVerificationKeySelector<>(JWSAlgorithm.RS256, jwkSource);
// JWT claims verifier — validates exp, nbf, iss automatically.
// The required claims set ensures these fields must be present.
DefaultJWTClaimsVerifier<SecurityContext> claimsVerifier =
new DefaultJWTClaimsVerifier<>(
// Exact match claims — issuer must exactly equal expectedIssuer
new JWTClaimsSet.Builder()
.issuer(expectedIssuer)
.build(),
// Required claim names — must be present (any value)
new HashSet<>(Arrays.asList("sub", "exp", "iat", CLAIM_CLIENT_ID))
);
jwtProcessor = new DefaultJWTProcessor<>();
jwtProcessor.setJWSKeySelector(keySelector);
jwtProcessor.setJWTClaimsSetVerifier(claimsVerifier);
log.info("KeycloakJwtInterceptor initialised: issuer={} jwksUrl={} " +
"requiredRole={} adminRole={} jwksCacheTtlSeconds={}",
expectedIssuer, jwksUrl, requiredRole, adminRole, jwksCacheTtlSeconds);
}
// =========================================================================
// HAPI interceptor hook — fires before every request is processed
// =========================================================================
/**
* Pre-request hook — validates JWT before HAPI processes the request.
*
* <p>Runs at {@link Pointcut#SERVER_INCOMING_REQUEST_PRE_HANDLED} —
* after HAPI has parsed the request URL and method but before any
* resource reading, validation, or persistence occurs.
*
* <p>On authentication failure: throws {@link AuthenticationException}
* (HTTP 401) or {@link ForbiddenOperationException} (HTTP 403).
* HAPI catches these and returns the appropriate HTTP response with
* a FHIR OperationOutcome.
*
* <p>On success: sets request attributes and MDC context, then returns
* normally — HAPI continues processing the request.
*
* @return {@code true} to continue processing; exception thrown on failure
*/
@Hook(Pointcut.SERVER_INCOMING_REQUEST_PRE_HANDLED)
public boolean validateRequest(
RequestDetails requestDetails,
HttpServletRequest servletRequest,
HttpServletResponse servletResponse) {
String requestId = UUID.randomUUID().toString();
String requestPath = servletRequest.getRequestURI();
String method = servletRequest.getMethod();
String clientIp = extractClientIp(servletRequest);
// Assign request ID immediately — available in logs even for rejected requests
servletRequest.setAttribute(REQUEST_ATTR_REQUEST_ID, requestId);
// Populate MDC with available context before auth (so rejected requests are logged)
MDC.put(MDC_REQUEST_ID, requestId);
MDC.put(MDC_REQUEST_IP, clientIp);
try {
// Check if this path is exempt from authentication
if (isExemptPath(requestPath, method)) {
servletRequest.setAttribute(REQUEST_ATTR_AUTH_EXEMPTED, Boolean.TRUE);
log.debug("Auth exempted: method={} path={} requestId={}",
method, requestPath, requestId);
return true;
}
// Extract Bearer token
String authHeader = servletRequest.getHeader("Authorization");
if (authHeader == null || authHeader.isBlank()) {
log.info("Auth rejected — no Authorization header: " +
"method={} path={} ip={} requestId={}",
method, requestPath, clientIp, requestId);
throw unauthorised("No Bearer token provided",
servletResponse, requestId);
}
if (!authHeader.startsWith("Bearer ")) {
log.info("Auth rejected — Authorization header not Bearer: " +
"method={} path={} ip={} requestId={}",
method, requestPath, clientIp, requestId);
throw unauthorised("Authorization header must use Bearer scheme",
servletResponse, requestId);
}
String token = authHeader.substring(7).trim();
if (token.isBlank()) {
throw unauthorised("Bearer token is empty",
servletResponse, requestId);
}
// Validate JWT: signature, expiry, issuer
JWTClaimsSet claims;
try {
claims = jwtProcessor.process(token, null);
} catch (com.nimbusds.jwt.proc.BadJWTException e) {
// BadJWTException covers: expired, wrong issuer, missing required claims
String reason = classifyJwtException(e);
log.info("Auth rejected — JWT claims invalid: reason={} " +
"method={} path={} ip={} requestId={}",
reason, method, requestPath, clientIp, requestId);
throw unauthorised(reason, servletResponse, requestId);
} catch (BadJOSEException e) {
// BadJOSEException covers: invalid signature, no matching key
log.info("Auth rejected — JWT signature invalid: method={} " +
"path={} ip={} requestId={} detail={}",
method, requestPath, clientIp, requestId, e.getMessage());
throw unauthorised("Token signature invalid",
servletResponse, requestId);
} catch (JOSEException e) {
// JOSEException: key processing error, algorithm mismatch
log.warn("Auth error — JOSE processing failed: method={} path={} " +
"ip={} requestId={} error={}",
method, requestPath, clientIp, requestId, e.getMessage());
throw unauthorised("Token processing error",
servletResponse, requestId);
} catch (ParseException e) {
// Malformed JWT structure — not a valid JWT at all
log.info("Auth rejected — malformed JWT: method={} path={} " +
"ip={} requestId={}",
method, requestPath, clientIp, requestId);
throw unauthorised("Token is malformed",
servletResponse, requestId);
}
// Extract identity claims
String clientId = extractClientId(claims);
String subject = extractSubject(claims);
String facility = extractFacility(claims, clientId);
// Role validation — check mci-api in both realm_access and resource_access
boolean hasMciApiRole = hasRole(claims, requiredRole);
if (!hasMciApiRole) {
log.info("Auth rejected — missing required role '{}': " +
"clientId={} subject={} method={} path={} ip={} requestId={}",
requiredRole, clientId, subject, method,
requestPath, clientIp, requestId);
throw unauthorised("Required role '" + requiredRole + "' not present in token",
servletResponse, requestId);
}
// Check for admin role (does NOT fail if absent — stored as attribute)
boolean hasAdminRole = hasRole(claims, adminRole);
// Set request attributes for downstream components
servletRequest.setAttribute(REQUEST_ATTR_CLIENT_ID, clientId);
servletRequest.setAttribute(REQUEST_ATTR_FACILITY, facility);
servletRequest.setAttribute(REQUEST_ATTR_SUBJECT, subject);
servletRequest.setAttribute(REQUEST_ATTR_IS_ADMIN, hasAdminRole);
servletRequest.setAttribute(REQUEST_ATTR_CLAIMS, claims);
servletRequest.setAttribute(REQUEST_ATTR_AUTH_EXEMPTED, Boolean.FALSE);
// Populate MDC with full identity context for structured logging.
// All log statements after this point (in this thread) will include
// clientId, sendingFacility, and requestId automatically.
MDC.put(MDC_CLIENT_ID, clientId);
MDC.put(MDC_FACILITY, facility);
log.debug("Auth accepted: clientId={} facility={} subject={} " +
"isAdmin={} method={} path={} requestId={}",
clientId, facility, subject, hasAdminRole,
method, requestPath, requestId);
return true; // Proceed with request processing
} catch (AuthenticationException | ForbiddenOperationException e) {
// Re-throw HAPI exceptions — do not wrap them
throw e;
} catch (Exception e) {
// Unexpected error in auth interceptor — fail closed
log.error("Unexpected error in JWT interceptor: method={} path={} " +
"ip={} requestId={} error={}",
method, requestPath, clientIp, requestId, e.getMessage(), e);
throw unauthorised("Authentication processing error",
servletResponse, requestId);
}
}
/**
* Post-request hook — cleans up MDC context after request completes.
*
* <p>MDC is thread-local. In a thread-pool model, threads are reused.
* If MDC is not cleared after each request, the next request on the
* same thread inherits the previous request's MDC context — causing
* log entries for request N to appear attributed to client/facility of
* request N-1.
*
* <p>This is the most common MDC bug in HAPI overlays and the most
* difficult to reproduce in testing (requires concurrent load).
*/
@Hook(Pointcut.SERVER_PROCESSING_COMPLETED_NORMALLY)
public void clearMdcOnSuccess(RequestDetails requestDetails) {
MDC.remove(MDC_REQUEST_ID);
MDC.remove(MDC_CLIENT_ID);
MDC.remove(MDC_FACILITY);
MDC.remove(MDC_REQUEST_IP);
}
/**
* Post-request hook for failed requests — cleans up MDC even on exception.
*
* <p>Both hooks are required. {@code SERVER_PROCESSING_COMPLETED_NORMALLY}
* does not fire on exceptions. Without this hook, MDC leaks on any request
* that results in an exception (including 422 validation failures).
*/
@Hook(Pointcut.SERVER_PROCESSING_COMPLETED)
public void clearMdcAlways(RequestDetails requestDetails) {
MDC.remove(MDC_REQUEST_ID);
MDC.remove(MDC_CLIENT_ID);
MDC.remove(MDC_FACILITY);
MDC.remove(MDC_REQUEST_IP);
}
// =========================================================================
// Role extraction — Keycloak JWT structure
// =========================================================================
/**
* Checks whether a JWT contains the specified role.
*
* <p>Keycloak places roles in two locations:
* <ul>
* <li>{@code realm_access.roles[]} — realm-level roles</li>
* <li>{@code resource_access.{client-id}.roles[]} — client-level roles</li>
* </ul>
* The role is present if it appears in either location.
*/
@SuppressWarnings("unchecked")
private boolean hasRole(JWTClaimsSet claims, String role) {
try {
// Check realm_access.roles
Map<String, Object> realmAccess =
(Map<String, Object>) claims.getClaim(CLAIM_REALM_ACCESS);
if (realmAccess != null) {
List<String> realmRoles = (List<String>) realmAccess.get(CLAIM_ROLES);
if (realmRoles != null && realmRoles.contains(role)) {
return true;
}
}
// Check resource_access.*.roles
Map<String, Object> resourceAccess =
(Map<String, Object>) claims.getClaim(CLAIM_RESOURCE_ACCESS);
if (resourceAccess != null) {
for (Map.Entry<String, Object> entry : resourceAccess.entrySet()) {
Map<String, Object> clientAccess =
(Map<String, Object>) entry.getValue();
if (clientAccess != null) {
List<String> clientRoles =
(List<String>) clientAccess.get(CLAIM_ROLES);
if (clientRoles != null && clientRoles.contains(role)) {
return true;
}
}
}
}
return false;
} catch (ClassCastException e) {
// Malformed role claims — treat as role absent
log.warn("Malformed role claims in JWT: {}", e.getMessage());
return false;
}
}
// =========================================================================
// Claim extraction helpers
// =========================================================================
private String extractClientId(JWTClaimsSet claims) {
try {
String azp = claims.getStringClaim(CLAIM_CLIENT_ID);
return azp != null ? azp : "unknown";
} catch (ParseException e) {
return "unknown";
}
}
private String extractSubject(JWTClaimsSet claims) {
String sub = claims.getSubject();
return sub != null ? sub : "unknown";
}
/**
* Extracts the sending facility identifier from the JWT.
*
* <p>The {@code sending_facility} claim is a custom Keycloak mapper
* configured on each vendor client (see ops/keycloak-setup.md).
* It contains the DGHS facility code of the submitting organisation.
*
* <p>If absent (e.g., during initial rollout before all clients are
* configured), falls back to {@code client_id}. This fallback is logged
* at WARN level so the ops team can identify unconfigured clients.
*/
private String extractFacility(JWTClaimsSet claims, String clientId) {
try {
String facility = claims.getStringClaim(CLAIM_SENDING_FACILITY);
if (facility != null && !facility.isBlank()) {
return facility;
}
// Fallback — log so unconfigured clients are visible
log.warn("sending_facility claim absent in token for clientId={}. " +
"Using client_id as facility identifier. " +
"Configure a sending_facility user attribute mapper on this " +
"Keycloak client — see ops/keycloak-setup.md.",
clientId);
return clientId;
} catch (ParseException e) {
return clientId;
}
}
// =========================================================================
// Path exemption
// =========================================================================
private boolean isExemptPath(String requestPath, String method) {
// Exact path match
if (EXEMPT_EXACT_PATHS.contains(requestPath)) {
return true;
}
// Prefix match for /actuator/health/* sub-paths
for (String prefix : EXEMPT_PREFIX_PATHS) {
if (requestPath.startsWith(prefix)) {
return true;
}
}
// Prometheus metrics endpoint — only exempt if accessed from internal network.
// For simplicity at pilot phase, /actuator/prometheus is exempted entirely.
// At national rollout, restrict to internal monitoring network via nginx.
if (requestPath.equals("/actuator/prometheus") ||
requestPath.equals("/actuator/metrics")) {
return true;
}
return false;
}
// =========================================================================
// Error handling
// =========================================================================
/**
* Builds and throws an {@link AuthenticationException} (HTTP 401).
*
* <p>Sets {@code WWW-Authenticate} header per RFC 6750 §3.1.
* The realm value identifies the BD FHIR server. Error description
* is intentionally generic in the HTTP response — full details are
* in the audit log only.
*
* <p>The requestId is included in the response so vendors can correlate
* a rejected submission with the audit log entry. This is the only
* DGHS-internal identifier that vendors receive.
*/
private AuthenticationException unauthorised(
String internalReason,
HttpServletResponse response,
String requestId) {
// WWW-Authenticate header per RFC 6750
response.setHeader("WWW-Authenticate",
"Bearer realm=\"BD FHIR National Repository\", " +
"error=\"invalid_token\", " +
"error_description=\"Token validation failed\"");
// Return minimal information in the exception message.
// The HAPI framework converts this to an OperationOutcome.
// Do NOT include the internalReason in the response — it may
// reveal token structure information to an attacker.
return new AuthenticationException(
"Authentication failed. RequestId: " + requestId +
" — present this ID to DGHS support for investigation.");
}
/**
* Classifies a Nimbus {@link com.nimbusds.jwt.proc.BadJWTException}
* into a human-readable reason for internal logging.
*
* <p>These reasons go to the audit log only — never to the HTTP response.
*/
private String classifyJwtException(com.nimbusds.jwt.proc.BadJWTException e) {
String msg = e.getMessage();
if (msg == null) return "JWT validation failed";
String lower = msg.toLowerCase();
if (lower.contains("expired")) return "Token has expired";
if (lower.contains("issuer")) return "Token issuer mismatch: expected " + expectedIssuer;
if (lower.contains("not before")) return "Token not yet valid (nbf claim)";
if (lower.contains("missing") || lower.contains("required")) {
return "Token missing required claim";
}
// Do not include the raw message — it may contain token fragments
return "Token claims validation failed";
}
// =========================================================================
// Client IP extraction
// =========================================================================
/**
* Extracts the real client IP from the request.
*
* <p>nginx is configured to set {@code X-Forwarded-For} with the real
* client IP. Trust only the first IP in the header — it is set by nginx
* and cannot be spoofed by the client (nginx overwrites, not appends).
*
* <p>If {@code X-Forwarded-For} is absent (direct connection, possible
* in development), falls back to {@code RemoteAddr}.
*/
private String extractClientIp(HttpServletRequest request) {
String xff = request.getHeader("X-Forwarded-For");
if (xff != null && !xff.isBlank()) {
// nginx sets exactly one IP — no need to split on comma
// If comma-separated, take the first (leftmost = original client)
int commaIdx = xff.indexOf(',');
return commaIdx > 0 ? xff.substring(0, commaIdx).trim() : xff.trim();
}
return request.getRemoteAddr();
}
}

View File

@@ -0,0 +1,609 @@
package bd.gov.dghs.fhir.terminology;
import ca.uhn.fhir.context.FhirContext;
import ca.uhn.fhir.context.support.ConceptValidationOptions;
import ca.uhn.fhir.context.support.IValidationSupport;
import ca.uhn.fhir.context.support.ValidationSupportContext;
import ca.uhn.fhir.context.support.ValueSetExpansionOptions;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hc.client5.http.classic.methods.HttpGet;
import org.apache.hc.client5.http.config.RequestConfig;
import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
import org.apache.hc.client5.http.impl.classic.HttpClients;
import org.apache.hc.core5.http.ClassicHttpResponse;
import org.apache.hc.core5.util.Timeout;
import org.hl7.fhir.instance.model.api.IBaseResource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import java.io.IOException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
/**
* Custom HAPI validation support that integrates OCL as the national
* terminology authority for ICD-11 code validation.
*
* <h2>Design</h2>
* <p>This class extends HAPI's {@link IValidationSupport} interface and sits
* last in the {@link ca.uhn.fhir.context.support.ValidationSupportChain}.
* It intercepts terminology validation calls for the ICD-11 MMS system
* ({@code http://id.who.int/icd/release/11/mms}) and routes them to OCL
* {@code $validate-code}.
*
* <h2>$expand suppression</h2>
* <p>OCL returns an empty response for {@code $expand}. HAPI's default
* {@link ca.uhn.fhir.jpa.term.api.ITermReadSvc} treats an empty expansion
* as a validation failure, causing all ICD-11 coded resources to be rejected
* regardless of code validity. This class overrides
* {@link #expandValueSet(ValidationSupportContext, ValueSetExpansionOptions, IBaseResource)}
* to return {@code null} (not-supported) for ICD-11 ValueSets, which instructs
* the chain to skip expansion and proceed directly to {@code $validate-code}.
*
* <h2>Caching</h2>
* <p>Validated codes are cached in a {@link ConcurrentHashMap} with a
* configurable TTL (default 24 hours). Cache entries store the validation result
* (valid/invalid) and the timestamp of validation. On cache hit, OCL is never called.
*
* <p>Cache key format: {@code system|version|code}
* where version is the OCL collection version string or empty string if absent.
*
* <h2>Cache invalidation</h2>
* <p>The cache is flushed by {@link TerminologyCacheManager} when the ICD-11
* version upgrade pipeline calls the admin flush endpoint. After flush, the next
* validation call for each code hits OCL again and repopulates the cache.
*
* <h2>Error handling</h2>
* <p>OCL connectivity failures are handled as follows:
* <ul>
* <li>Timeout (>10s): return {@code null} (not-supported) — HAPI falls through
* to next support in chain. If no other support handles it, the code is
* treated as valid (fail-open for OCL outages).</li>
* <li>HTTP 4xx: return invalid result — code was rejected by OCL.</li>
* <li>HTTP 5xx: return {@code null} (not-supported) — OCL server error,
* fail-open to prevent full service outage during OCL maintenance.</li>
* <li>{@code $expand} failure: log and return {@code null} — never reject.</li>
* </ul>
*
* <p>Fail-open for OCL outages is a deliberate policy decision. The alternative
* (fail-closed) would reject all coded resource submissions during OCL downtime,
* which is operationally worse than allowing a small window of unvalidated codes.
* OCL outages must be tracked in the audit log (see AuditEventInterceptor).
*/
@Component
public class BdTerminologyValidationSupport implements IValidationSupport {
private static final Logger log = LoggerFactory.getLogger(BdTerminologyValidationSupport.class);
/** ICD-11 MMS system URI — must match BD Core IG profile declarations */
public static final String ICD11_SYSTEM = "http://id.who.int/icd/release/11/mms";
/** Cache: key = "system|version|code", value = CacheEntry */
private final Map<String, CacheEntry> validationCache = new ConcurrentHashMap<>();
/** Background thread for cache TTL eviction */
private final ScheduledExecutorService cacheEvictionExecutor =
Executors.newSingleThreadScheduledExecutor(r -> {
Thread t = new Thread(r, "terminology-cache-eviction");
t.setDaemon(true);
return t;
});
private final FhirContext fhirContext;
private final ObjectMapper objectMapper;
private CloseableHttpClient httpClient;
@Value("${bd.fhir.ocl.base-url}")
private String oclBaseUrl;
@Value("${bd.fhir.ocl.timeout-seconds}")
private int timeoutSeconds;
@Value("${bd.fhir.ocl.retry-attempts}")
private int retryAttempts;
@Value("${bd.fhir.terminology.cache-ttl-seconds}")
private long cacheTtlSeconds;
public BdTerminologyValidationSupport(FhirContext fhirContext) {
this.fhirContext = fhirContext;
this.objectMapper = new ObjectMapper();
}
@PostConstruct
public void initialise() {
// Build a dedicated HttpClient for OCL calls.
// Separate from HAPI's internal HttpClient to avoid
// interference with HAPI's own REST operations.
RequestConfig requestConfig = RequestConfig.custom()
.setConnectionRequestTimeout(Timeout.ofSeconds(timeoutSeconds))
.setResponseTimeout(Timeout.ofSeconds(timeoutSeconds))
.build();
this.httpClient = HttpClients.custom()
.setDefaultRequestConfig(requestConfig)
.setMaxConnTotal(20) // max total OCL connections
.setMaxConnPerRoute(20) // all connections go to one host
.evictExpiredConnections()
.evictIdleConnections(Timeout.ofMinutes(5))
.build();
// Schedule cache eviction every hour.
// Eviction removes entries older than cacheTtlSeconds.
// This prevents unbounded cache growth over long uptime periods.
cacheEvictionExecutor.scheduleAtFixedRate(
this::evictExpiredCacheEntries,
1, 1, TimeUnit.HOURS);
log.info("BdTerminologyValidationSupport initialised: oclBaseUrl={}, " +
"timeoutSeconds={}, cacheTtlSeconds={}", oclBaseUrl, timeoutSeconds, cacheTtlSeconds);
}
@PreDestroy
public void shutdown() {
cacheEvictionExecutor.shutdownNow();
try {
if (httpClient != null) {
httpClient.close();
}
} catch (IOException e) {
log.warn("Error closing OCL HttpClient: {}", e.getMessage());
}
}
// =========================================================================
// IValidationSupport interface
// =========================================================================
@Override
public FhirContext getFhirContext() {
return fhirContext;
}
/**
* Called by HAPI validation chain to validate a code against a ValueSet.
*
* <p>For ICD-11 codes: checks cache first, then calls OCL {@code $validate-code}.
* For non-ICD-11 codes: returns {@code null} (defer to other supports in chain).
*/
@Override
public CodeValidationResult validateCode(
ValidationSupportContext theValidationSupportContext,
ConceptValidationOptions theOptions,
String theCodeSystem,
String theCode,
String theDisplay,
String theValueSetUrl) {
if (!isIcd11System(theCodeSystem)) {
// Not our responsibility — defer to next support in chain
return null;
}
if (theCode == null || theCode.isBlank()) {
return invalid("Code is null or empty", theCodeSystem, theCode);
}
String cacheKey = buildCacheKey(theCodeSystem, null, theCode);
// Cache hit
CacheEntry cached = validationCache.get(cacheKey);
if (cached != null && !cached.isExpired(cacheTtlSeconds)) {
log.debug("Terminology cache hit: system={} code={} valid={}",
theCodeSystem, theCode, cached.valid);
return cached.valid
? valid(theCode, cached.display)
: invalid(cached.invalidReason, theCodeSystem, theCode);
}
// Cache miss — call OCL
return validateWithOcl(theCodeSystem, theCode, theDisplay, theValueSetUrl, cacheKey);
}
/**
* Called by HAPI validation chain to validate a code against a CodeSystem.
* Delegates to {@link #validateCode} for ICD-11 codes.
*/
@Override
public CodeValidationResult validateCodeInValueSet(
ValidationSupportContext theValidationSupportContext,
ConceptValidationOptions theOptions,
String theCodeSystem,
String theCode,
String theDisplay,
IBaseResource theValueSet) {
if (!isIcd11System(theCodeSystem)) {
return null;
}
return validateCode(theValidationSupportContext, theOptions,
theCodeSystem, theCode, theDisplay, null);
}
/**
* Suppresses $expand for ICD-11 ValueSets.
*
* <p>OCL returns an empty response for {@code $expand}. If this method
* returned a failed expansion result, HAPI would treat it as a validation
* failure and reject all ICD-11 coded resources. Instead, we return
* {@code null} (not supported), which instructs the chain to skip
* expansion for this ValueSet.
*
* <p>This override is the key to OCL integration correctness.
* Without it, HAPI calls {@code $expand}, gets an empty response,
* and rejects the resource regardless of whether the code is valid.
*/
@Override
public ValueSetExpansionOutcome expandValueSet(
ValidationSupportContext theValidationSupportContext,
ValueSetExpansionOptions theExpansionOptions,
IBaseResource theValueSetToExpand) {
// Check if this is an ICD-11 ValueSet before suppressing
if (theValueSetToExpand != null) {
String valueSetUrl = extractValueSetUrl(theValueSetToExpand);
if (valueSetUrl != null && valueSetUrl.contains("icd11")) {
log.debug("Suppressing $expand for ICD-11 ValueSet: {} " +
"(OCL does not support $expand — using $validate-code instead)",
valueSetUrl);
// Return null = not supported by this support.
// The chain will try the next support, which will also
// return null, and HAPI will fall through to $validate-code.
return null;
}
}
// Non-ICD-11 ValueSets: not our responsibility
return null;
}
/**
* Indicates whether this support can handle ValueSet expansion for ICD-11.
*
* <p>Returning {@code false} for ICD-11 ValueSets prevents HAPI from
* even attempting {@code $expand} via this support — it goes directly
* to {@code $validate-code}. This is the {@code isValueSetSupported()}
* override hook added in HAPI 7.2.0 specifically for this use case.
*/
@Override
public boolean isValueSetSupported(
ValidationSupportContext theValidationSupportContext,
String theValueSetUrl) {
if (theValueSetUrl != null && theValueSetUrl.contains("icd11")) {
log.debug("isValueSetSupported=false for ICD-11 ValueSet: {} " +
"(routing to $validate-code)", theValueSetUrl);
return false;
}
return false; // Let other supports in chain answer for non-ICD-11 ValueSets
}
/**
* Indicates whether this support can handle code system lookups for ICD-11.
*/
@Override
public boolean isCodeSystemSupported(
ValidationSupportContext theValidationSupportContext,
String theSystem) {
return isIcd11System(theSystem);
}
// =========================================================================
// OCL $validate-code call
// =========================================================================
private CodeValidationResult validateWithOcl(
String codeSystem,
String code,
String display,
String valueSetUrl,
String cacheKey) {
// Build OCL $validate-code URL.
// Use valueSetUrl if provided (checks class restriction via bd-condition-icd11 VS).
// Fall back to code system validation if no ValueSet provided.
String url = buildValidateCodeUrl(codeSystem, code, display, valueSetUrl);
log.debug("OCL $validate-code: url={}", url);
for (int attempt = 1; attempt <= retryAttempts; attempt++) {
try {
CodeValidationResult result = executeOclCall(url, codeSystem, code, cacheKey);
if (result != null) {
return result;
}
} catch (OclTimeoutException e) {
log.warn("OCL $validate-code timeout (attempt {}/{}): system={} code={} url={}",
attempt, retryAttempts, codeSystem, code, url);
if (attempt == retryAttempts) {
// After all retries exhausted: fail-open.
// Log as warn — the AuditEventInterceptor will record
// the OCL unavailability in the audit trail.
log.warn("OCL unavailable after {} attempts — accepting code " +
"without terminology validation (fail-open): system={} code={}",
retryAttempts, codeSystem, code);
return null; // null = not supported = defer = fail-open
}
// Brief wait before retry
try { Thread.sleep(500L * attempt); } catch (InterruptedException ie) {
Thread.currentThread().interrupt();
return null;
}
} catch (OclServerErrorException e) {
log.warn("OCL server error (attempt {}/{}): system={} code={} status={}",
attempt, retryAttempts, codeSystem, code, e.statusCode);
if (attempt == retryAttempts) {
log.warn("OCL server error after {} attempts — fail-open: system={} code={}",
retryAttempts, codeSystem, code);
return null; // fail-open on server errors
}
try { Thread.sleep(500L * attempt); } catch (InterruptedException ie) {
Thread.currentThread().interrupt();
return null;
}
} catch (Exception e) {
log.error("Unexpected error calling OCL: system={} code={} error={}",
codeSystem, code, e.getMessage(), e);
return null; // fail-open on unexpected errors
}
}
return null;
}
private CodeValidationResult executeOclCall(
String url, String codeSystem, String code, String cacheKey)
throws OclTimeoutException, OclServerErrorException, IOException {
HttpGet request = new HttpGet(url);
request.setHeader("Accept", "application/fhir+json");
return httpClient.execute(request, (ClassicHttpResponse response) -> {
int statusCode = response.getCode();
if (statusCode == 200) {
// Parse Parameters response from OCL $validate-code
byte[] body = response.getEntity().getContent().readAllBytes();
return parseValidateCodeResponse(body, codeSystem, code, cacheKey);
} else if (statusCode >= 400 && statusCode < 500) {
// 4xx: code rejected by OCL (not found, wrong system, wrong class)
String reason = "OCL rejected code: HTTP " + statusCode +
" for system=" + codeSystem + " code=" + code;
log.info("OCL $validate-code rejected: system={} code={} status={}",
codeSystem, code, statusCode);
// Cache the rejection — do not re-call OCL for the same invalid code
validationCache.put(cacheKey, CacheEntry.invalid(reason));
return invalid(reason, codeSystem, code);
} else if (statusCode >= 500) {
throw new OclServerErrorException(statusCode);
} else {
log.warn("Unexpected OCL status: {}", statusCode);
return null;
}
});
}
private CodeValidationResult parseValidateCodeResponse(
byte[] body, String codeSystem, String code, String cacheKey) {
try {
JsonNode root = objectMapper.readTree(body);
// OCL $validate-code response is a FHIR Parameters resource.
// Key parameter: "result" (boolean) — true if valid, false if invalid.
// Optional parameter: "display" — preferred display term.
// Optional parameter: "message" — reason for invalidity.
JsonNode parameter = root.path("parameter");
boolean result = false;
String display = null;
String message = null;
if (parameter.isArray()) {
for (JsonNode param : parameter) {
String name = param.path("name").asText();
switch (name) {
case "result" -> result = param.path("valueBoolean").asBoolean(false);
case "display" -> display = param.path("valueString").asText(null);
case "message" -> message = param.path("valueString").asText(null);
}
}
}
if (result) {
log.debug("OCL validated code: system={} code={} display={}",
codeSystem, code, display);
validationCache.put(cacheKey, CacheEntry.valid(display));
return valid(code, display);
} else {
String reason = message != null ? message :
"Code not valid in system: system=" + codeSystem + " code=" + code;
log.info("OCL rejected code: system={} code={} reason={}", codeSystem, code, reason);
validationCache.put(cacheKey, CacheEntry.invalid(reason));
return invalid(reason, codeSystem, code);
}
} catch (Exception e) {
log.error("Failed to parse OCL $validate-code response: system={} code={} error={}",
codeSystem, code, e.getMessage());
// Parse failure: fail-open — do not reject the resource
return null;
}
}
// =========================================================================
// Cache management (called by TerminologyCacheManager)
// =========================================================================
/**
* Flushes the entire terminology validation cache.
*
* <p>Called by {@link TerminologyCacheManager} when the ICD-11 version
* upgrade pipeline completes. After flush, the next validation call for
* each code hits OCL and repopulates the cache with the new version's results.
*
* <p>Thread-safe: {@link ConcurrentHashMap#clear()} is atomic.
*
* @return number of entries that were evicted
*/
public int flushCache() {
int size = validationCache.size();
validationCache.clear();
log.info("Terminology cache flushed: {} entries evicted", size);
return size;
}
/**
* Returns current cache statistics for the admin endpoint.
*/
public CacheStats getCacheStats() {
long now = Instant.now().getEpochSecond();
long expired = validationCache.values().stream()
.filter(e -> e.isExpired(cacheTtlSeconds))
.count();
return new CacheStats(validationCache.size(), expired, cacheTtlSeconds);
}
private void evictExpiredCacheEntries() {
int before = validationCache.size();
validationCache.entrySet().removeIf(e -> e.getValue().isExpired(cacheTtlSeconds));
int evicted = before - validationCache.size();
if (evicted > 0) {
log.debug("Terminology cache eviction: {} expired entries removed", evicted);
}
}
// =========================================================================
// URL builders
// =========================================================================
private String buildValidateCodeUrl(
String codeSystem, String code, String display, String valueSetUrl) {
StringBuilder sb = new StringBuilder(oclBaseUrl);
if (valueSetUrl != null && !valueSetUrl.isBlank()) {
// ValueSet-scoped validation — enforces class restriction
// (Diagnosis + Finding only for bd-condition-icd11-diagnosis-valueset)
sb.append("/ValueSet/$validate-code");
sb.append("?url=").append(encode(valueSetUrl));
sb.append("&system=").append(encode(codeSystem));
sb.append("&code=").append(encode(code));
} else {
// CodeSystem-scoped validation — validates existence only
sb.append("/CodeSystem/$validate-code");
sb.append("?system=").append(encode(codeSystem));
sb.append("&code=").append(encode(code));
}
if (display != null && !display.isBlank()) {
sb.append("&display=").append(encode(display));
}
return sb.toString();
}
// =========================================================================
// Helpers
// =========================================================================
private boolean isIcd11System(String system) {
return ICD11_SYSTEM.equals(system);
}
private String buildCacheKey(String system, String version, String code) {
return system + "|" + (version != null ? version : "") + "|" + code;
}
private String encode(String value) {
return URLEncoder.encode(value, StandardCharsets.UTF_8);
}
private String extractValueSetUrl(IBaseResource valueSet) {
try {
// Use FHIR R4 reflection to extract url from ValueSet resource
if (valueSet instanceof org.hl7.fhir.r4.model.ValueSet vs) {
return vs.getUrl();
}
} catch (Exception e) {
log.debug("Could not extract ValueSet URL: {}", e.getMessage());
}
return null;
}
private CodeValidationResult valid(String code, String display) {
return new CodeValidationResult()
.setCode(code)
.setDisplay(display)
.setSeverity(IssueSeverity.INFORMATION);
}
private CodeValidationResult invalid(String message, String system, String code) {
return new CodeValidationResult()
.setSeverity(IssueSeverity.ERROR)
.setMessage(message)
.setCode(code)
.setCodeSystemName(system);
}
// =========================================================================
// Inner classes
// =========================================================================
/** Cache entry holding validation result and creation timestamp. */
private static final class CacheEntry {
final boolean valid;
final String display; // non-null if valid
final String invalidReason; // non-null if invalid
final long createdEpochSeconds;
private CacheEntry(boolean valid, String display, String invalidReason) {
this.valid = valid;
this.display = display;
this.invalidReason = invalidReason;
this.createdEpochSeconds = Instant.now().getEpochSecond();
}
static CacheEntry valid(String display) {
return new CacheEntry(true, display, null);
}
static CacheEntry invalid(String reason) {
return new CacheEntry(false, null, reason);
}
boolean isExpired(long ttlSeconds) {
return (Instant.now().getEpochSecond() - createdEpochSeconds) > ttlSeconds;
}
}
/** Exception for OCL timeout scenarios */
private static class OclTimeoutException extends RuntimeException {
OclTimeoutException(String message) { super(message); }
}
/** Exception for OCL HTTP 5xx responses */
private static class OclServerErrorException extends RuntimeException {
final int statusCode;
OclServerErrorException(int statusCode) {
super("OCL server error: " + statusCode);
this.statusCode = statusCode;
}
}
/** Cache statistics for the admin endpoint */
public record CacheStats(int totalEntries, long expiredEntries, long ttlSeconds) {}
}

View File

@@ -0,0 +1,182 @@
package bd.gov.dghs.fhir.terminology;
import bd.gov.dghs.fhir.interceptor.KeycloakJwtInterceptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.DeleteMapping;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import jakarta.servlet.http.HttpServletRequest;
import java.time.Instant;
import java.util.Map;
/**
* Admin REST controller exposing the terminology cache management endpoint.
*
* <h2>Endpoints</h2>
* <pre>
* DELETE /admin/terminology/cache
* Flushes the ICD-11 validation cache.
* Required role: fhir-admin (NOT mci-api)
* Called by: ICD-11 version upgrade pipeline after OCL import completes.
* See: ops/version-upgrade-integration.md
*
* GET /admin/terminology/cache/stats
* Returns current cache statistics.
* Required role: fhir-admin
* </pre>
*
* <h2>Security</h2>
* <p>Both endpoints require the {@code fhir-admin} Keycloak role.
* The {@link KeycloakJwtInterceptor} enforces authentication and the
* {@code mci-api} role for FHIR resource endpoints. For admin endpoints,
* this controller performs an additional role check for {@code fhir-admin}.
*
* <p>The cache flush endpoint is a denial-of-service vector if unauthenticated:
* an attacker repeatedly flushing the cache forces 50,000+ cold OCL
* {@code $validate-code} calls per flush cycle. The {@code fhir-admin}
* role requirement is the primary protection. Additionally, the endpoint
* is rate-limited at nginx (see nginx.conf — /admin/ location block).
*
* <h2>Upgrade pipeline integration</h2>
* <p>The version upgrade pipeline calls this endpoint after completing:
* <ol>
* <li>OCL ICD-11 import</li>
* <li>concept_class patch for Diagnosis + Finding concepts</li>
* <li>bd-condition-icd11-diagnosis-valueset repopulation</li>
* </ol>
* Order is critical: the cache must be flushed AFTER OCL has the new codes,
* not before. Flushing before OCL import completes means validation calls
* hit OCL with the old version and repopulate the cache with stale results,
* negating the purpose of the flush.
*
* @see BdTerminologyValidationSupport#flushCache()
* @see ops/version-upgrade-integration.md
*/
@RestController
@RequestMapping("/admin/terminology")
public class TerminologyCacheManager {
private static final Logger log = LoggerFactory.getLogger(TerminologyCacheManager.class);
private final BdTerminologyValidationSupport terminologySupport;
public TerminologyCacheManager(BdTerminologyValidationSupport terminologySupport) {
this.terminologySupport = terminologySupport;
}
/**
* Flushes the ICD-11 terminology validation cache.
*
* <p>After this call returns 200, the next validation request for every
* ICD-11 code will hit OCL directly. The cache will repopulate organically
* as vendors submit resources. There is no pre-warming mechanism — the cache
* is demand-driven.
*
* <p>This endpoint is idempotent: calling it multiple times has the same
* effect as calling it once. The upgrade pipeline may call it safely on
* retry without side effects.
*
* @return 200 with flush summary if successful
* @return 403 if caller does not have {@code fhir-admin} role
*/
@DeleteMapping(value = "/cache", produces = MediaType.APPLICATION_JSON_VALUE)
public ResponseEntity<Map<String, Object>> flushCache(HttpServletRequest request) {
// Enforce fhir-admin role — this is a separate check from the
// KeycloakJwtInterceptor mci-api check. The interceptor allows any
// authenticated mci-api request through to the FHIR endpoints.
// Admin endpoints require a different, more privileged role.
if (!hasAdminRole(request)) {
log.warn("Cache flush rejected: caller lacks fhir-admin role. " +
"clientId={}", getClientId(request));
return ResponseEntity.status(403).body(Map.of(
"error", "Forbidden",
"message", "Cache flush requires fhir-admin role",
"timestamp", Instant.now().toString()
));
}
String clientId = getClientId(request);
String requestId = getRequestId(request);
log.info("Terminology cache flush initiated: clientId={} requestId={}",
clientId, requestId);
int evicted = terminologySupport.flushCache();
log.info("Terminology cache flush completed: evicted={} clientId={} requestId={}",
evicted, clientId, requestId);
return ResponseEntity.ok(Map.of(
"status", "flushed",
"entriesEvicted", evicted,
"timestamp", Instant.now().toString(),
"message", "Terminology cache flushed. Next validation requests " +
"will call OCL directly until cache repopulates.",
"requestId", requestId != null ? requestId : "unknown"
));
}
/**
* Returns current cache statistics.
*
* <p>Useful for the upgrade pipeline to verify cache state before and
* after a flush, and for ops to understand cache hit rates.
*/
@GetMapping(value = "/cache/stats", produces = MediaType.APPLICATION_JSON_VALUE)
public ResponseEntity<Map<String, Object>> getCacheStats(HttpServletRequest request) {
if (!hasAdminRole(request)) {
return ResponseEntity.status(403).body(Map.of(
"error", "Forbidden",
"message", "Cache stats require fhir-admin role"
));
}
BdTerminologyValidationSupport.CacheStats stats = terminologySupport.getCacheStats();
return ResponseEntity.ok(Map.of(
"totalEntries", stats.totalEntries(),
"expiredEntries", stats.expiredEntries(),
"liveEntries", stats.totalEntries() - stats.expiredEntries(),
"cacheTtlSeconds", stats.ttlSeconds(),
"timestamp", Instant.now().toString()
));
}
// =========================================================================
// Private helpers — extract JWT claims set by KeycloakJwtInterceptor
// =========================================================================
/**
* Checks whether the request has the {@code fhir-admin} role.
*
* <p>{@link KeycloakJwtInterceptor} validates the JWT and stores extracted
* claims as request attributes. The admin role claim is stored under the key
* {@code BD_FHIR_IS_ADMIN}. This avoids re-parsing the JWT in this controller.
*
* <p>If the JWT interceptor has not run (e.g., request came through a path
* not intercepted by HAPI), the attribute will be absent and this returns
* {@code false} — fail closed.
*/
private boolean hasAdminRole(HttpServletRequest request) {
Object adminFlag = request.getAttribute(
KeycloakJwtInterceptor.REQUEST_ATTR_IS_ADMIN);
return Boolean.TRUE.equals(adminFlag);
}
private String getClientId(HttpServletRequest request) {
Object clientId = request.getAttribute(
KeycloakJwtInterceptor.REQUEST_ATTR_CLIENT_ID);
return clientId != null ? clientId.toString() : "unknown";
}
private String getRequestId(HttpServletRequest request) {
Object requestId = request.getAttribute(
KeycloakJwtInterceptor.REQUEST_ATTR_REQUEST_ID);
return requestId != null ? requestId.toString() : null;
}
}

View File

@@ -0,0 +1,483 @@
package bd.gov.dghs.fhir.validator;
import ca.uhn.fhir.rest.api.server.RequestDetails;
import ca.uhn.fhir.rest.server.exceptions.UnprocessableEntityException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hc.client5.http.classic.methods.HttpPost;
import org.apache.hc.client5.http.config.RequestConfig;
import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
import org.apache.hc.client5.http.impl.classic.HttpClients;
import org.apache.hc.core5.http.ClassicHttpResponse;
import org.apache.hc.core5.http.ContentType;
import org.apache.hc.core5.http.io.entity.StringEntity;
import org.apache.hc.core5.util.Timeout;
import org.hl7.fhir.r4.model.CodeableConcept;
import org.hl7.fhir.r4.model.Coding;
import org.hl7.fhir.r4.model.Condition;
import org.hl7.fhir.r4.model.Extension;
import org.hl7.fhir.r4.model.OperationOutcome;
import org.hl7.fhir.r4.model.Resource;
import org.hl7.fhir.r4.model.StringType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Validates ICD-11 postcoordinated cluster expressions in FHIR resources.
*
* <h2>BD Core IG Cluster Expression Pattern</h2>
* <p>BD Core IG prohibits raw postcoordinated strings in {@code Coding.code}.
* Cluster expressions MUST be represented as:
* <pre>
* "code": {
* "coding": [{
* "system": "http://id.who.int/icd/release/11/mms",
* "code": "XY9Z", &lt;-- stem code (validated by OCL)
* "extension": [{
* "url": "icd11-cluster-expression",
* "valueString": "XY9Z&amp;has_manifestation=AB12" &lt;-- full expression
* }]
* }]
* }
* </pre>
*
* <h2>Validation logic</h2>
* <p>For each {@code Coding} element with system = ICD-11 MMS:
* <ol>
* <li>Check if {@code icd11-cluster-expression} extension is present.</li>
* <li>If present: extract stem code and cluster expression. Call cluster
* validator to validate the full expression. Both OCL stem validation
* (handled by {@link BdTerminologyValidationSupport}) AND cluster
* expression validation must pass. If cluster validation fails: 422.</li>
* <li>If absent: check if {@code Coding.code} contains postcoordination
* syntax characters ({@code &}, {@code /}, {@code %}). If yes: reject
* with 422 — raw postcoordinated strings without the extension are
* explicitly prohibited by BD Core IG. If no: plain stem code, no
* cluster validation required.</li>
* </ol>
*
* <h2>Integration with OCL validation</h2>
* <p>This validator is invoked AFTER OCL has validated the stem code.
* If OCL rejects the stem code, the resource is already rejected before
* this class is called. This validator handles the additional cluster
* expression validation layer.
*
* <p>This class is invoked by {@link bd.gov.dghs.fhir.interceptor.AuditEventInterceptor}
* as a pre-storage hook, after the HAPI RequestValidatingInterceptor has run
* profile + OCL validation.
*/
@Component
public class ClusterExpressionValidator {
private static final Logger log = LoggerFactory.getLogger(ClusterExpressionValidator.class);
/** ICD-11 MMS system URI */
private static final String ICD11_SYSTEM = "http://id.who.int/icd/release/11/mms";
/** Extension URL that marks a Coding as containing a cluster expression */
private static final String CLUSTER_EXT_URL = "icd11-cluster-expression";
/**
* Characters that indicate a raw postcoordinated expression in Coding.code.
* BD Core IG prohibits these in Coding.code without the cluster extension.
*/
private static final char[] POSTCOORD_CHARS = {'&', '/', '%'};
@Value("${bd.fhir.cluster-validator.url}")
private String clusterValidatorUrl;
@Value("${bd.fhir.cluster-validator.timeout-seconds}")
private int timeoutSeconds;
private CloseableHttpClient httpClient;
private final ObjectMapper objectMapper = new ObjectMapper();
@PostConstruct
public void initialise() {
RequestConfig config = RequestConfig.custom()
.setConnectionRequestTimeout(Timeout.ofSeconds(timeoutSeconds))
.setResponseTimeout(Timeout.ofSeconds(timeoutSeconds))
.build();
this.httpClient = HttpClients.custom()
.setDefaultRequestConfig(config)
.setMaxConnTotal(10)
.setMaxConnPerRoute(10)
.evictExpiredConnections()
.evictIdleConnections(Timeout.ofMinutes(5))
.build();
log.info("ClusterExpressionValidator initialised: url={}, timeoutSeconds={}",
clusterValidatorUrl, timeoutSeconds);
}
@PreDestroy
public void shutdown() {
try {
if (httpClient != null) httpClient.close();
} catch (IOException e) {
log.warn("Error closing cluster validator HttpClient: {}", e.getMessage());
}
}
// =========================================================================
// Public API — called by AuditEventInterceptor
// =========================================================================
/**
* Validates all ICD-11 coded elements in a resource for cluster expressions.
*
* <p>This method is a no-op for resource types that do not contain ICD-11
* coded elements (e.g., Patient, Practitioner). It only performs validation
* for resource types that carry {@code CodeableConcept} elements with ICD-11
* codings.
*
* @param resource the resource being submitted
* @param requestDetails HAPI request context (for element path reporting)
* @throws UnprocessableEntityException with FHIR OperationOutcome if
* cluster expression validation fails
*/
public void validateResource(Resource resource, RequestDetails requestDetails) {
List<ClusterValidationTarget> targets = extractTargets(resource);
if (targets.isEmpty()) {
return; // No ICD-11 coded elements — nothing to validate
}
List<OperationOutcome.OperationOutcomeIssueComponent> issues = new ArrayList<>();
for (ClusterValidationTarget target : targets) {
validateTarget(target, issues);
}
if (!issues.isEmpty()) {
throw buildUnprocessableEntityException(issues);
}
}
// =========================================================================
// Target extraction — identify all ICD-11 Coding elements in resource
// =========================================================================
/**
* Extracts all ICD-11 Coding elements from a resource that need cluster validation.
*
* <p>Currently handles: Condition.code
* Extend this method as BD Core IG adds cluster expression support to
* other resource types (e.g., Observation.code, Procedure.code).
*/
private List<ClusterValidationTarget> extractTargets(Resource resource) {
List<ClusterValidationTarget> targets = new ArrayList<>();
if (resource instanceof Condition condition) {
extractFromCodeableConcept(
condition.getCode(), "Condition.code", targets);
}
// Future: add Observation.code, Procedure.code, etc. here as
// BD Core IG expands cluster expression support to other profiles.
return targets;
}
private void extractFromCodeableConcept(
CodeableConcept codeableConcept,
String fhirPath,
List<ClusterValidationTarget> targets) {
if (codeableConcept == null || codeableConcept.isEmpty()) {
return;
}
for (int i = 0; i < codeableConcept.getCoding().size(); i++) {
Coding coding = codeableConcept.getCoding().get(i);
String codingPath = fhirPath + ".coding[" + i + "]";
if (!ICD11_SYSTEM.equals(coding.getSystem())) {
continue; // Not ICD-11 — skip
}
String code = coding.getCode();
if (code == null || code.isBlank()) {
continue; // No code — let profile validation handle this
}
// Check for cluster extension
Extension clusterExt = findClusterExtension(coding);
if (clusterExt != null) {
// Cluster extension present — extract expression and validate
String clusterExpression = extractClusterExpression(clusterExt);
if (clusterExpression == null || clusterExpression.isBlank()) {
// Extension present but empty — reject
targets.add(new ClusterValidationTarget(
code, null, codingPath,
ClusterValidationTarget.Type.EMPTY_EXTENSION));
} else {
targets.add(new ClusterValidationTarget(
code, clusterExpression, codingPath,
ClusterValidationTarget.Type.CLUSTER_EXPRESSION));
}
} else {
// No cluster extension — check if code looks like raw postcoordination
if (containsPostcoordinationChars(code)) {
// Raw postcoordinated string without extension — prohibited
targets.add(new ClusterValidationTarget(
code, null, codingPath,
ClusterValidationTarget.Type.RAW_POSTCOORD));
}
// Plain stem code — no cluster validation needed
}
}
}
// =========================================================================
// Validation execution
// =========================================================================
private void validateTarget(
ClusterValidationTarget target,
List<OperationOutcome.OperationOutcomeIssueComponent> issues) {
switch (target.type) {
case RAW_POSTCOORD -> {
// Reject immediately — no external call needed
log.info("Rejected raw postcoordinated ICD-11 code without " +
"icd11-cluster-expression extension: path={} code={}",
target.fhirPath, target.stemCode);
issues.add(buildIssue(
OperationOutcome.IssueSeverity.ERROR,
OperationOutcome.IssueType.BUSINESSRULE,
"ICD-11 postcoordinated expression in " + target.fhirPath +
" must use the icd11-cluster-expression extension. " +
"Raw postcoordinated strings in Coding.code are prohibited " +
"by BD Core IG. Found code: '" + target.stemCode + "'. " +
"The stem code must be in Coding.code and the full cluster " +
"expression must be in the icd11-cluster-expression extension.",
target.fhirPath + ".code"));
}
case EMPTY_EXTENSION -> {
issues.add(buildIssue(
OperationOutcome.IssueSeverity.ERROR,
OperationOutcome.IssueType.REQUIRED,
"icd11-cluster-expression extension at " + target.fhirPath +
" is present but contains no value. " +
"The extension valueString must contain the full " +
"ICD-11 cluster expression.",
target.fhirPath + ".extension[url=icd11-cluster-expression].valueString"));
}
case CLUSTER_EXPRESSION -> {
// Call cluster validator middleware
validateClusterExpression(target, issues);
}
}
}
/**
* Calls the cluster validator middleware to validate a postcoordinated expression.
*
* <p>The cluster validator validates the syntactic and semantic correctness
* of the full postcoordinated cluster expression. OCL validates the stem code
* (handled upstream). Both must pass.
*
* <p>Request body format (application/json):
* <pre>
* {
* "stemCode": "XY9Z",
* "clusterExpression": "XY9Z&has_manifestation=AB12",
* "system": "http://id.who.int/icd/release/11/mms"
* }
* </pre>
*
* <p>Response format on success (200):
* <pre>
* { "valid": true }
* </pre>
*
* <p>Response format on failure (200 with valid=false, or 422):
* <pre>
* { "valid": false, "reason": "Invalid axis: has_manifestation requires..." }
* </pre>
*/
private void validateClusterExpression(
ClusterValidationTarget target,
List<OperationOutcome.OperationOutcomeIssueComponent> issues) {
try {
String requestBody = objectMapper.writeValueAsString(Map.of(
"stemCode", target.stemCode,
"clusterExpression", target.clusterExpression,
"system", ICD11_SYSTEM
));
HttpPost post = new HttpPost(clusterValidatorUrl);
post.setHeader("Content-Type", "application/json");
post.setHeader("Accept", "application/json");
post.setEntity(new StringEntity(requestBody, ContentType.APPLICATION_JSON));
ClusterValidationResponse response = httpClient.execute(
post, (ClassicHttpResponse httpResponse) -> {
int status = httpResponse.getCode();
if (status == 200 || status == 422) {
byte[] body = httpResponse.getEntity().getContent().readAllBytes();
return parseClusterResponse(body, status);
} else if (status >= 500) {
// Cluster validator server error — fail-open
// Document the same fail-open policy as OCL
log.warn("Cluster validator server error: status={} " +
"stemCode={} expression={} — accepting (fail-open)",
status, target.stemCode, target.clusterExpression);
return ClusterValidationResponse.failOpen(
"Cluster validator unavailable (HTTP " + status + ")");
} else {
log.warn("Unexpected cluster validator status: {}", status);
return ClusterValidationResponse.failOpen(
"Unexpected cluster validator response: " + status);
}
});
if (!response.valid && !response.failOpen) {
log.info("Cluster expression rejected: path={} stemCode={} " +
"expression={} reason={}",
target.fhirPath, target.stemCode,
target.clusterExpression, response.reason);
issues.add(buildIssue(
OperationOutcome.IssueSeverity.ERROR,
OperationOutcome.IssueType.CODEINVALID,
"ICD-11 cluster expression at " + target.fhirPath +
" failed validation. " +
"Stem code: '" + target.stemCode + "'. " +
"Expression: '" + target.clusterExpression + "'. " +
"Reason: " + (response.reason != null ? response.reason :
"Expression is not a valid ICD-11 postcoordinated cluster."),
target.fhirPath + ".extension[url=icd11-cluster-expression].valueString"));
} else if (response.failOpen) {
log.warn("Cluster validation skipped (fail-open): path={} stemCode={}",
target.fhirPath, target.stemCode);
// Resource accepted despite cluster validator unavailability.
// The AuditEventInterceptor will log this as a warning.
} else {
log.debug("Cluster expression valid: path={} stemCode={}",
target.fhirPath, target.stemCode);
}
} catch (Exception e) {
// Connection failure — fail-open (same policy as OCL)
log.error("Cluster validator connection failed: stemCode={} expression={} error={}",
target.stemCode, target.clusterExpression, e.getMessage());
// Do not add to issues — fail-open
}
}
// =========================================================================
// Helpers
// =========================================================================
private Extension findClusterExtension(Coding coding) {
return coding.getExtension().stream()
.filter(ext -> CLUSTER_EXT_URL.equals(ext.getUrl()))
.findFirst()
.orElse(null);
}
private String extractClusterExpression(Extension extension) {
if (extension.getValue() instanceof StringType st) {
return st.getValue();
}
// Handle other value types gracefully
return extension.getValue() != null ? extension.getValue().toString() : null;
}
private boolean containsPostcoordinationChars(String code) {
for (char c : POSTCOORD_CHARS) {
if (code.indexOf(c) >= 0) return true;
}
return false;
}
private ClusterValidationResponse parseClusterResponse(byte[] body, int statusCode) {
try {
var node = objectMapper.readTree(body);
boolean valid = node.path("valid").asBoolean(false);
String reason = node.path("reason").asText(null);
if (statusCode == 422) {
// Explicit rejection from cluster validator
return new ClusterValidationResponse(false, false,
reason != null ? reason : "Cluster expression rejected by validator");
}
return new ClusterValidationResponse(valid, false, reason);
} catch (Exception e) {
log.warn("Could not parse cluster validator response: {}", e.getMessage());
return ClusterValidationResponse.failOpen("Could not parse validator response");
}
}
private OperationOutcome.OperationOutcomeIssueComponent buildIssue(
OperationOutcome.IssueSeverity severity,
OperationOutcome.IssueType type,
String diagnostics,
String expression) {
var issue = new OperationOutcome.OperationOutcomeIssueComponent();
issue.setSeverity(severity);
issue.setCode(type);
issue.setDiagnostics(diagnostics);
if (expression != null) {
issue.addExpression(expression);
}
return issue;
}
private UnprocessableEntityException buildUnprocessableEntityException(
List<OperationOutcome.OperationOutcomeIssueComponent> issues) {
OperationOutcome oo = new OperationOutcome();
issues.forEach(oo::addIssue);
return new UnprocessableEntityException(
ca.uhn.fhir.context.FhirContext.forR4(),
oo);
}
// =========================================================================
// Inner classes
// =========================================================================
/** Represents a single ICD-11 Coding element requiring cluster validation */
private record ClusterValidationTarget(
String stemCode,
String clusterExpression,
String fhirPath,
Type type) {
enum Type {
CLUSTER_EXPRESSION, // has extension with valid value — call cluster validator
RAW_POSTCOORD, // has postcoord chars in code, no extension — reject immediately
EMPTY_EXTENSION // has extension but empty value — reject immediately
}
}
/** Response from the cluster validator middleware */
private record ClusterValidationResponse(
boolean valid,
boolean failOpen,
String reason) {
static ClusterValidationResponse failOpen(String reason) {
return new ClusterValidationResponse(true, true, reason);
}
}
}

View File

@@ -0,0 +1,534 @@
# =============================================================================
# BD FHIR National — application.yaml
# Spring Boot 3.2.x + HAPI FHIR 7.2.0
#
# ALL secrets and environment-specific values come from environment variables.
# No secret value appears in this file — only ${VARIABLE_NAME} references.
# This file is safe to commit to version control.
#
# Profile: prod (set via SPRING_PROFILES_ACTIVE=prod in docker-compose)
# Overrides can be placed in application-prod.yaml for prod-only values,
# but this single file covers all required configuration.
# =============================================================================
# -----------------------------------------------------------------------------
# SERVER
# -----------------------------------------------------------------------------
server:
port: 8080
# Graceful shutdown: allow in-flight requests to complete before container
# stops. Docker stop sends SIGTERM → Spring waits up to grace-period for
# active requests to finish → then shuts down JVM.
# tini in Dockerfile ensures SIGTERM is forwarded correctly to the JVM.
shutdown: graceful
# Tomcat connector tuning
tomcat:
# Max threads: number of concurrent HTTP requests HAPI can process.
# Each FHIR validation call is CPU-bound (IG profile check) + IO-bound
# (OCL $validate-code call). With OCL timeout=10s and 5 DB connections
# per replica, 20 threads is the correct ceiling at pilot scale.
# Increasing beyond DB pool size causes thread queueing, not parallelism.
threads:
max: 20
min-spare: 5
# Connection timeout: reject connections held open without sending data.
connection-timeout: 20000
# Accept count: queue depth when all threads are busy.
# 50 is generous for pilot scale. Requests beyond this get TCP RST.
accept-count: 50
# Servlet context path — HAPI registers its own /fhir path internally.
# Do not set context-path here; HAPI's RestfulServer manages the /fhir prefix.
servlet:
context-path: /
lifecycle:
# Must match server.shutdown: graceful
timeout-per-shutdown-phase: 30s
# -----------------------------------------------------------------------------
# SPRING CORE
# -----------------------------------------------------------------------------
spring:
application:
name: bd-fhir-hapi
profiles:
active: ${SPRING_PROFILES_ACTIVE:prod}
# ---------------------------------------------------------------------------
# PRIMARY DATASOURCE — FHIR store (postgres-fhir via pgBouncer)
# ---------------------------------------------------------------------------
datasource:
url: ${SPRING_DATASOURCE_URL}
username: ${SPRING_DATASOURCE_USERNAME}
password: ${SPRING_DATASOURCE_PASSWORD}
driver-class-name: org.postgresql.Driver
hikari:
pool-name: ${SPRING_DATASOURCE_HIKARI_POOL_NAME:fhir-pool}
maximum-pool-size: ${SPRING_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE:5}
minimum-idle: ${SPRING_DATASOURCE_HIKARI_MINIMUM_IDLE:2}
connection-timeout: ${SPRING_DATASOURCE_HIKARI_CONNECTION_TIMEOUT:30000}
idle-timeout: ${SPRING_DATASOURCE_HIKARI_IDLE_TIMEOUT:600000}
max-lifetime: ${SPRING_DATASOURCE_HIKARI_MAX_LIFETIME:1800000}
# pgBouncer session mode compatibility:
# autoCommit=true is safe with session mode — pgBouncer does not
# reset autoCommit between sessions (it maintains session state).
auto-commit: true
# Connection test query — verified against PostgreSQL 15.
# isValid() is preferred over connectionTestQuery for JDBC4 drivers
# (PostgreSQL JDBC is JDBC4). Leaving connectionTestQuery unset
# causes HikariCP to use isValid() which is more efficient.
# Leak detection: log a warning if a connection is held for >60s.
# A connection held beyond this is almost certainly a hung OCL call
# or a transaction that was not committed. Set to 0 in dev to disable.
leak-detection-threshold: 60000
data-source-properties:
# PostgreSQL-specific: use server-side prepared statements.
# With pgBouncer session mode this is safe. With transaction mode
# it would break — another reason session mode is mandatory here.
prepareThreshold: 5
# Batch size for inserts — improves search index write throughput
reWriteBatchedInserts: true
# Application name visible in pg_stat_activity
ApplicationName: bd-fhir-hapi
# Socket timeout: fail fast if PostgreSQL becomes unreachable.
# Without this, Hikari waits indefinitely on a dead connection.
socketTimeout: 30
# ---------------------------------------------------------------------------
# JPA / HIBERNATE
# ---------------------------------------------------------------------------
jpa:
# CRITICAL: validate — Hibernate checks schema matches entities but does
# NOT create or alter tables. Flyway owns all DDL.
# Never use create, create-drop, or update in production.
hibernate:
ddl-auto: validate
properties:
hibernate:
# PostgreSQL dialect for Hibernate 6 (bundled with Spring Boot 3.2)
dialect: org.hibernate.dialect.PostgreSQLDialect
# Search index batch writes — dramatically reduces INSERT round-trips
# for search parameter indexing. HAPI inserts many HFJ_SPIDX_* rows
# per resource. Batch size 50 reduces round-trips by 50x.
jdbc:
batch_size: 50
order_inserts: true
order_updates: true
# Fetch size for result set streaming — prevents OOM on large searches
fetch_size: 100
# Connection handling: DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION
# Releases the JDBC connection back to HikariCP after each transaction
# rather than holding it for the lifetime of the Session.
# Critical for efficient connection pool use with HAPI's long-lived
# EntityManager sessions.
connection:
handling_mode: DELAYED_ACQUISITION_AND_RELEASE_AFTER_TRANSACTION
# Second-level cache: disabled. HAPI manages its own caches.
# Enabling Hibernate L2 cache with HAPI causes cache coherence issues
# across replicas. Never enable for HAPI.
cache:
use_second_level_cache: false
use_query_cache: false
# Format SQL in logs for readability — disable in prod for performance
format_sql: false
# Log SQL — enable only for debugging, never in production
# (would log patient data to container stdout)
show_sql: false
# Statistics: disabled in production. Enable temporarily for
# performance tuning with: LOGGING_LEVEL_ORG_HIBERNATE_STAT=DEBUG
generate_statistics: false
# Open EntityManager in View: false — prevents lazy loading outside
# transactions. HAPI handles its own transaction boundaries.
open-in-view: false
# ---------------------------------------------------------------------------
# FLYWAY — FHIR schema (V1__hapi_schema.sql)
# Connects directly to postgres-fhir bypassing pgBouncer (see Step 4 notes).
# ---------------------------------------------------------------------------
flyway:
enabled: true
url: ${SPRING_FLYWAY_URL}
user: ${SPRING_FLYWAY_USER}
password: ${SPRING_FLYWAY_PASSWORD}
# V1 migration only — audit schema has its own Flyway instance (DataSourceConfig.java)
locations: classpath:db/migration/fhir
# Baseline on migrate: if the database already has tables (e.g., from a
# previous manual setup), Flyway will baseline rather than fail.
# Set to false after first successful migration.
baseline-on-migrate: false
# Validate on migrate: Flyway checks checksum of already-applied migrations.
# If V1 was modified after being applied, startup fails. This is correct
# behaviour — it catches accidental migration file edits.
validate-on-migrate: true
# Out-of-order: false — migrations must run in version order.
out-of-order: false
# Table: Flyway's own metadata table name
table: flyway_schema_history
# Mixed: allow mixing versioned and repeatable migrations
mixed: false
# ---------------------------------------------------------------------------
# JACKSON — JSON serialisation
# ---------------------------------------------------------------------------
jackson:
# HAPI serialises FHIR resources using its own FHIR serialiser, not Jackson.
# Jackson is used for: actuator endpoints, audit log payloads,
# OCL/cluster validator response parsing, admin REST responses.
serialization:
write-dates-as-timestamps: false
indent-output: false
deserialization:
fail-on-unknown-properties: false
default-property-inclusion: non_null
time-zone: UTC
# ---------------------------------------------------------------------------
# TASK EXECUTION — Spring's async executor
# Used by @Async methods: AuditEventEmitter writes audit records
# asynchronously so they do not block the FHIR request thread.
# ---------------------------------------------------------------------------
task:
execution:
pool:
core-size: 4
max-size: 10
queue-capacity: 500
# Thread name prefix — visible in thread dumps and profilers
keep-alive: 60s
thread-name-prefix: audit-async-
scheduling:
pool:
size: 2
thread-name-prefix: scheduled-
# -----------------------------------------------------------------------------
# HAPI FHIR CONFIGURATION
# These properties are read by HAPI's Spring Boot auto-configuration.
# Reference: https://hapifhir.io/hapi-fhir/docs/server_jpa/configuration.html
# -----------------------------------------------------------------------------
hapi:
fhir:
# FHIR version — must be R4 for BD Core IG 0.2.1
fhir-version: R4
# Server base URL — must match nginx proxy_pass destination and
# the URL published in BD Core IG CapabilityStatement.
server-address: ${HAPI_FHIR_SERVER_ADDRESS:https://fhir.dghs.gov.bd/fhir}
# Server name — appears in CapabilityStatement.software.name
server-name: "BD FHIR National Repository"
server-version: "1.0.0"
# -------------------------------------------------------------------------
# VALIDATION — enforce on ALL requests, no exceptions
# -------------------------------------------------------------------------
validation:
# Enable request validation — every incoming resource is validated
# before storage. This is the primary enforcement point for BD Core IG.
enabled: true
# Validate responses — disable for performance. Outgoing resources
# were already validated on ingestion. Re-validating on read adds
# latency with no security benefit.
response-enabled: false
# Request header validation — validate HTTP headers
request-only: false
# -------------------------------------------------------------------------
# STORAGE
# -------------------------------------------------------------------------
# Allow multiple delete: allow batch deletes (needed for admin operations)
allow-multiple-delete: false
# Allow external references: true — BD Core IG uses canonical URLs
allow-external-references: true
# Reuse cached search results for repeated identical queries
reuse-cached-search-results-millis: 60000
# Default page size for search results
default-page-size: 20
max-page-size: 200
# Retain cached searches for 1 hour
expire-search-results-after-millis: 3600000
# -------------------------------------------------------------------------
# RESOURCE TYPES
# Only R4 resource types in scope for BD national deployment.
# Unknown resource types are accepted with unvalidated-profile meta tag
# (handled in FhirServerConfig.java).
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# NARRATIVE GENERATION
# Disable — BD vendors provide their own narratives or none at all.
# Generating narratives server-side adds CPU overhead with no clinical value.
# -------------------------------------------------------------------------
narrative-enabled: false
# -------------------------------------------------------------------------
# CORS — disabled at HAPI level, handled at nginx level
# -------------------------------------------------------------------------
cors:
enabled: false
# -------------------------------------------------------------------------
# SUBSCRIPTIONS — disabled for pilot phase
# Enable in Phase 2 when real-time notification requirements are defined.
# -------------------------------------------------------------------------
subscription:
resthook-enabled: false
websocket-enabled: false
email-enabled: false
# -------------------------------------------------------------------------
# BULK EXPORT — enabled for DGHS analytics
# -------------------------------------------------------------------------
bulk-export-enabled: true
# -------------------------------------------------------------------------
# PARTITIONING — disabled (single-tenant national deployment)
# -------------------------------------------------------------------------
partitioning:
partitioning-enabled: false
# -------------------------------------------------------------------------
# ADVANCED LUCENE INDEXING — disabled
# Uses Hibernate Search / Lucene for full-text search.
# Not required for BD Core IG search parameters (all token/date/reference).
# Enabling adds significant memory overhead and startup time.
# -------------------------------------------------------------------------
advanced-lucene-indexing: false
# -------------------------------------------------------------------------
# TERMINOLOGY
# Primary terminology validation is handled by BdTerminologyValidationSupport
# (custom class, Step 7). These HAPI settings control the built-in
# terminology infrastructure that runs alongside our custom support.
# -------------------------------------------------------------------------
# Cache terminology lookups in HAPI's internal cache.
# Our custom cache (24h TTL) is separate from HAPI's internal cache.
# Set HAPI's internal cache shorter so our cache is the effective cache.
terminology-cache-size: 1000
# -----------------------------------------------------------------------------
# AUDIT DATASOURCE
# Custom datasource — not managed by Spring Boot auto-configuration.
# Configured programmatically in DataSourceConfig.java.
# Environment variables read here for reference — actual wiring in Java.
# -----------------------------------------------------------------------------
audit:
datasource:
url: ${AUDIT_DATASOURCE_URL}
username: ${AUDIT_DATASOURCE_USERNAME}
password: ${AUDIT_DATASOURCE_PASSWORD}
hikari:
pool-name: ${AUDIT_DATASOURCE_HIKARI_POOL_NAME:audit-pool}
maximum-pool-size: ${AUDIT_DATASOURCE_HIKARI_MAXIMUM_POOL_SIZE:2}
minimum-idle: ${AUDIT_DATASOURCE_HIKARI_MINIMUM_IDLE:1}
# Longer connection timeout — audit failures should not
# block FHIR request processing
connection-timeout: 5000
idle-timeout: 300000
max-lifetime: 900000
auto-commit: true
data-source-properties:
ApplicationName: bd-fhir-hapi-audit
socketTimeout: 10
flyway:
url: ${AUDIT_FLYWAY_URL}
user: ${AUDIT_FLYWAY_USER}
password: ${AUDIT_FLYWAY_PASSWORD}
locations: classpath:db/migration/audit
table: flyway_audit_schema_history
# -----------------------------------------------------------------------------
# OCL TERMINOLOGY SERVICE
# Configuration for BdTerminologyValidationSupport (Step 7).
# All values injected as environment variables.
# -----------------------------------------------------------------------------
bd:
fhir:
ocl:
base-url: ${HAPI_OCL_BASE_URL:https://tr.ocl.dghs.gov.bd/api/fhir}
timeout-seconds: ${HAPI_OCL_TIMEOUT_SECONDS:10}
retry-attempts: ${HAPI_OCL_RETRY_ATTEMPTS:2}
# ICD-11 MMS system URI — must match what BD Core IG profiles declare
icd11-system: http://id.who.int/icd/release/11/mms
# BD Condition ValueSet canonical URL
condition-valueset-url: https://fhir.dghs.gov.bd/core/ValueSet/bd-condition-icd11-diagnosis-valueset
cluster-validator:
url: ${HAPI_CLUSTER_VALIDATOR_URL:https://icd11.dghs.gov.bd/cluster/validate}
timeout-seconds: ${HAPI_CLUSTER_VALIDATOR_TIMEOUT_SECONDS:10}
# Extension URL that marks a Coding as containing a cluster expression
extension-url: icd11-cluster-expression
# ICD-11 system URI — same as above, repeated for cluster validator context
icd11-system: http://id.who.int/icd/release/11/mms
keycloak:
issuer: ${KEYCLOAK_ISSUER:https://auth.dghs.gov.bd/realms/hris}
jwks-url: ${KEYCLOAK_JWKS_URL:https://auth.dghs.gov.bd/realms/hris/protocol/openid-connect/certs}
required-role: ${KEYCLOAK_REQUIRED_ROLE:mci-api}
admin-role: ${KEYCLOAK_ADMIN_ROLE:fhir-admin}
jwks-cache-ttl-seconds: ${KEYCLOAK_JWKS_CACHE_TTL_SECONDS:3600}
ig:
package-classpath: ${HAPI_IG_PACKAGE_CLASSPATH:classpath:packages/bd.gov.dghs.core-0.2.1.tgz}
version: ${HAPI_IG_VERSION:0.2.1}
terminology:
# 24-hour cache TTL for validated ICD-11 codes (in seconds)
cache-ttl-seconds: ${HAPI_TERMINOLOGY_CACHE_TTL_SECONDS:86400}
# Unknown resource type handling
# Resources with types not profiled in BD Core IG are stored with
# meta.tag = unvalidated-profile (see FhirServerConfig.java)
unvalidated-profile-tag-system: https://fhir.dghs.gov.bd/tags
unvalidated-profile-tag-code: unvalidated-profile
# -----------------------------------------------------------------------------
# ACTUATOR — health, info, metrics
# -----------------------------------------------------------------------------
management:
endpoints:
web:
# Only expose endpoints needed by load balancer and ops team.
# Never expose env, beans, or mappings in production — they leak
# configuration including partially-masked secrets.
exposure:
include:
- health
- info
- metrics
- prometheus
- loggers
base-path: /actuator
endpoint:
health:
# Show details only to authenticated requests.
# Load balancer hits /actuator/health/liveness — no auth needed for liveness.
show-details: when-authorized
show-components: when-authorized
# Separate liveness and readiness probes.
# liveness: is the JVM alive? (load balancer uses this)
# readiness: is the application ready to serve traffic?
# readiness waits for: Flyway migrations complete, IG loaded,
# OCL connectivity verified.
probes:
enabled: true
group:
liveness:
include:
- livenessState
- ping
readiness:
include:
- readinessState
- db # FHIR datasource
- auditDb # Audit datasource (custom indicator)
- ocl # OCL reachability (custom indicator)
info:
enabled: true
metrics:
enabled: true
prometheus:
enabled: true
loggers:
enabled: true
# Health indicator configuration
health:
# Disable default DataSourceHealthIndicator — it would check ALL datasources
# including the INSERT-only audit datasource with SELECT 1, which would fail
# for the audit_writer role. We replace it with custom indicators.
db:
enabled: true # kept for FHIR datasource (hapi_app user can SELECT 1)
# Disable default disk space check — not useful in container environment
diskspace:
enabled: false
# Disable default livenessstate check behaviour override
defaults:
enabled: true
# Metrics
metrics:
tags:
# Common tags on all metrics — useful for filtering in Prometheus/Grafana
application: bd-fhir-hapi
environment: ${SPRING_PROFILES_ACTIVE:prod}
export:
prometheus:
enabled: true
# Info endpoint
info:
env:
enabled: true
build:
enabled: true
git:
enabled: true
mode: simple
# Application metadata for /actuator/info
info:
application:
name: BD FHIR National Repository
description: National FHIR R4 repository and validation engine
ig-version: ${HAPI_IG_VERSION:0.2.1}
fhir-version: R4
hapi-version: 7.2.0
# -----------------------------------------------------------------------------
# LOGGING
# Structured JSON output for ELK ingestion.
# Log levels controlled via environment variables in docker-compose.
# IMPORTANT: Never log at DEBUG in production — FHIR resources contain
# patient data. DEBUG logs in HAPI output full resource JSON.
# -----------------------------------------------------------------------------
logging:
level:
root: ${LOGGING_LEVEL_ROOT:WARN}
bd.gov.dghs: ${LOGGING_LEVEL_BD_GOV_DGHS:INFO}
bd.gov.dghs.fhir.interceptor: ${LOGGING_LEVEL_BD_GOV_DGHS_FHIR_INTERCEPTOR:INFO}
bd.gov.dghs.fhir.terminology: ${LOGGING_LEVEL_BD_GOV_DGHS_FHIR_TERMINOLOGY:INFO}
bd.gov.dghs.fhir.validator: ${LOGGING_LEVEL_BD_GOV_DGHS_FHIR_VALIDATOR:INFO}
ca.uhn.hapi.fhir: ${LOGGING_LEVEL_CA_UHN_HAPI:WARN}
org.springframework: ${LOGGING_LEVEL_ORG_SPRINGFRAMEWORK:WARN}
org.springframework.web: WARN
org.hibernate: WARN
org.hibernate.SQL: WARN
# Flyway: INFO to see migration progress on startup
org.flywaydb: INFO
# HikariCP: WARN unless debugging pool exhaustion
com.zaxxer.hikari: WARN
# Nimbus JWT: WARN unless debugging token validation
com.nimbusds: WARN
pattern:
# Structured JSON logging via logstash-logback-encoder (configured in
# logback-spring.xml). This pattern is the fallback for non-JSON output.
console: "%d{yyyy-MM-dd'T'HH:mm:ss.SSSZ} [%thread] %-5level %logger{36} - %msg%n"
# Log file — written to Docker volume for Filebeat pickup
file:
name: /app/logs/bd-fhir-hapi.log
max-size: 100MB
max-history: 7
total-size-cap: 1GB

View File

@@ -0,0 +1,632 @@
-- =============================================================================
-- V2__audit_schema.sql
-- Audit Schema — PostgreSQL 15
--
-- Creates:
-- audit.audit_events — FHIR AuditEvent records (partitioned by month)
-- audit.fhir_rejected_submissions — Rejected resource payloads (partitioned by month)
-- audit.health_check — Used by AuditDataSourceHealthIndicator
-- audit.schema_version — Schema version tracking
--
-- Partitioning strategy:
-- Both main tables use PARTITION BY RANGE (event_time) with monthly partitions.
-- Partitions are pre-created for current year + 2 years forward.
-- A maintenance job (cron) must create next-month partitions before month rollover.
-- See ops/scaling-roadmap.md for partition maintenance instructions.
--
-- Security:
-- Role audit_writer has INSERT only on audit schema.
-- Role audit_reader has SELECT only (for DGHS analytics queries).
-- HAPI JVM connects as audit_writer via datasource.audit.
-- No UPDATE, DELETE, TRUNCATE granted to any application role.
-- Only a DBA superuser can modify or delete audit records.
--
-- IMMUTABILITY NOTE:
-- Application roles cannot UPDATE or DELETE rows.
-- PostgreSQL row-level security is NOT used here — immutability is
-- enforced entirely through GRANT/REVOKE at the schema level.
-- For stronger guarantees, consider pg_audit extension or
-- logical replication to an append-only replica.
-- =============================================================================
-- ---------------------------------------------------------------------------
-- SCHEMA
-- ---------------------------------------------------------------------------
CREATE SCHEMA IF NOT EXISTS audit;
-- ---------------------------------------------------------------------------
-- ROLES
-- Created here; passwords set via environment variable at runtime.
-- If roles already exist (re-run scenario), skip creation.
-- ---------------------------------------------------------------------------
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'audit_writer') THEN
CREATE ROLE audit_writer NOLOGIN;
END IF;
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'audit_reader') THEN
CREATE ROLE audit_reader NOLOGIN;
END IF;
END
$$;
-- Grant schema usage
GRANT USAGE ON SCHEMA audit TO audit_writer;
GRANT USAGE ON SCHEMA audit TO audit_reader;
-- ---------------------------------------------------------------------------
-- SCHEMA VERSION
-- Simple metadata table, not partitioned, not audited.
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS audit.schema_version (
version VARCHAR(20) NOT NULL,
applied_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
description TEXT,
CONSTRAINT PK_audit_schema_version PRIMARY KEY (version)
);
INSERT INTO audit.schema_version (version, description)
VALUES ('2.0.0', 'Initial audit schema with monthly partitioning')
ON CONFLICT DO NOTHING;
-- ---------------------------------------------------------------------------
-- HEALTH CHECK TABLE
-- Used exclusively by AuditDataSourceHealthIndicator.
-- INSERT ... ON CONFLICT DO NOTHING avoids growing this table.
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS audit.health_check (
check_id VARCHAR(36) NOT NULL,
checked_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
CONSTRAINT PK_audit_health_check PRIMARY KEY (check_id)
);
-- Seed one row so the health check INSERT can use ON CONFLICT
INSERT INTO audit.health_check (check_id)
VALUES ('00000000-0000-0000-0000-000000000000')
ON CONFLICT DO NOTHING;
GRANT INSERT ON audit.health_check TO audit_writer;
-- ---------------------------------------------------------------------------
-- AUDIT EVENTS — partitioned by month
--
-- Columns:
-- event_id — UUID, generated by application
-- event_time — UTC timestamp (partition key)
-- event_type — OPERATION | AUTH_FAILURE | VALIDATION_FAILURE
-- operation — CREATE | UPDATE | DELETE | READ
-- resource_type — Patient | Condition | Encounter | etc.
-- resource_id — FHIR logical ID (may be null for rejected resources)
-- resource_version— FHIR version number (null for rejected)
-- outcome — ACCEPTED | REJECTED
-- outcome_detail — Human-readable rejection reason
-- sending_facility— Extracted from Keycloak token claim
-- client_id — Keycloak client_id (fhir-vendor-{org-id})
-- subject — Keycloak sub (service account user ID)
-- request_ip — Client IP address from X-Forwarded-For or RemoteAddr
-- request_id — Random UUID assigned per HTTP request for correlation
-- validation_messages — JSONB array of OperationOutcome issues
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS audit.audit_events (
event_id UUID NOT NULL,
event_time TIMESTAMP WITH TIME ZONE NOT NULL,
event_type VARCHAR(30) NOT NULL
CHECK (event_type IN ('OPERATION','AUTH_FAILURE','VALIDATION_FAILURE','TERMINOLOGY_FAILURE','CLUSTER_FAILURE')),
operation VARCHAR(10)
CHECK (operation IN ('CREATE','UPDATE','DELETE','READ','PATCH')),
resource_type VARCHAR(40),
resource_id VARCHAR(64),
resource_version BIGINT,
outcome VARCHAR(10) NOT NULL
CHECK (outcome IN ('ACCEPTED','REJECTED')),
outcome_detail TEXT,
sending_facility VARCHAR(200),
client_id VARCHAR(200) NOT NULL,
subject VARCHAR(200) NOT NULL,
request_ip VARCHAR(45), -- supports IPv6
request_id VARCHAR(36),
validation_messages JSONB,
-- Partition key must be included in primary key for partitioned tables
CONSTRAINT PK_audit_events PRIMARY KEY (event_id, event_time)
) PARTITION BY RANGE (event_time);
-- Indexes on the parent table — PostgreSQL 11+ propagates to partitions
CREATE INDEX IF NOT EXISTS IDX_AE_CLIENT_ID
ON audit.audit_events (client_id, event_time DESC);
CREATE INDEX IF NOT EXISTS IDX_AE_FACILITY
ON audit.audit_events (sending_facility, event_time DESC);
CREATE INDEX IF NOT EXISTS IDX_AE_RESOURCE
ON audit.audit_events (resource_type, resource_id, event_time DESC)
WHERE resource_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS IDX_AE_OUTCOME
ON audit.audit_events (outcome, event_time DESC)
WHERE outcome = 'REJECTED';
CREATE INDEX IF NOT EXISTS IDX_AE_REQUEST_ID
ON audit.audit_events (request_id)
WHERE request_id IS NOT NULL;
-- ---------------------------------------------------------------------------
-- REJECTED SUBMISSIONS — partitioned by month
--
-- Stores full rejected resource payload for forensic purposes.
-- Vendors can request their rejected submissions from DGHS for debugging.
-- DO NOT expose this table directly via API — only through DGHS admin tools.
--
-- Columns:
-- submission_id — UUID, generated by application
-- submission_time — UTC timestamp (partition key)
-- event_id — FK to audit.audit_events (same UUID)
-- resource_type — FHIR resource type
-- resource_payload— Full JSON payload as submitted (before any modification)
-- rejection_code — Machine-readable rejection code
-- rejection_reason— Human-readable rejection reason
-- element_path — FHIRPath expression of the violating element
-- violated_profile— URL of the violated profile constraint
-- invalid_code — The invalid code value (for terminology rejections)
-- invalid_system — The code system of the invalid code
-- sending_facility— Extracted from Keycloak token claim
-- client_id — Keycloak client_id
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions (
submission_id UUID NOT NULL,
submission_time TIMESTAMP WITH TIME ZONE NOT NULL,
event_id UUID NOT NULL,
resource_type VARCHAR(40),
resource_payload TEXT NOT NULL, -- full JSON, not JSONB — preserve exact bytes
rejection_code VARCHAR(50) NOT NULL
CHECK (rejection_code IN (
'PROFILE_VIOLATION',
'TERMINOLOGY_INVALID_CODE',
'TERMINOLOGY_INVALID_CLASS',
'CLUSTER_EXPRESSION_INVALID',
'CLUSTER_STEM_MISSING_EXTENSION',
'AUTH_TOKEN_MISSING',
'AUTH_TOKEN_EXPIRED',
'AUTH_TOKEN_INVALID_SIGNATURE',
'AUTH_TOKEN_MISSING_ROLE',
'AUTH_TOKEN_INVALID_ISSUER'
)),
rejection_reason TEXT NOT NULL,
element_path VARCHAR(500),
violated_profile VARCHAR(500),
invalid_code VARCHAR(200),
invalid_system VARCHAR(200),
sending_facility VARCHAR(200),
client_id VARCHAR(200) NOT NULL,
-- Partition key in PK
CONSTRAINT PK_fhir_rejected_submissions PRIMARY KEY (submission_id, submission_time)
) PARTITION BY RANGE (submission_time);
CREATE INDEX IF NOT EXISTS IDX_RS_CLIENT_ID
ON audit.fhir_rejected_submissions (client_id, submission_time DESC);
CREATE INDEX IF NOT EXISTS IDX_RS_REJECTION_CODE
ON audit.fhir_rejected_submissions (rejection_code, submission_time DESC);
CREATE INDEX IF NOT EXISTS IDX_RS_FACILITY
ON audit.fhir_rejected_submissions (sending_facility, submission_time DESC);
CREATE INDEX IF NOT EXISTS IDX_RS_EVENT_ID
ON audit.fhir_rejected_submissions (event_id);
-- ---------------------------------------------------------------------------
-- MONTHLY PARTITIONS — pre-created for 2025-2027
--
-- Naming convention: audit_events_YYYY_MM
--
-- MAINTENANCE REQUIREMENT:
-- Create next month's partition BEFORE the 1st of each month.
-- Failing to do so causes INSERT to fail with:
-- "no partition of relation ... found for row"
-- Add to cron on the audit PostgreSQL host:
-- 0 0 20 * * psql -U postgres -d auditdb -c "SELECT audit.create_next_month_partitions();"
-- See ops/scaling-roadmap.md for the partition maintenance function.
-- ---------------------------------------------------------------------------
-- Helper macro: generate partition DDL
-- audit_events partitions
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_01
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-01-01') TO ('2025-02-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_02
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-02-01') TO ('2025-03-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_03
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-03-01') TO ('2025-04-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_04
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-04-01') TO ('2025-05-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_05
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-05-01') TO ('2025-06-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_06
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-06-01') TO ('2025-07-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_07
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-07-01') TO ('2025-08-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_08
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-08-01') TO ('2025-09-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_09
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-09-01') TO ('2025-10-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_10
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-10-01') TO ('2025-11-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_11
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-11-01') TO ('2025-12-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2025_12
PARTITION OF audit.audit_events
FOR VALUES FROM ('2025-12-01') TO ('2026-01-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_01
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-01-01') TO ('2026-02-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_02
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-02-01') TO ('2026-03-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_03
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-03-01') TO ('2026-04-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_04
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-04-01') TO ('2026-05-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_05
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-05-01') TO ('2026-06-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_06
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-06-01') TO ('2026-07-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_07
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-07-01') TO ('2026-08-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_08
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-08-01') TO ('2026-09-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_09
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-09-01') TO ('2026-10-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_10
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-10-01') TO ('2026-11-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_11
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-11-01') TO ('2026-12-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2026_12
PARTITION OF audit.audit_events
FOR VALUES FROM ('2026-12-01') TO ('2027-01-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_01
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-01-01') TO ('2027-02-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_02
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-02-01') TO ('2027-03-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_03
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-03-01') TO ('2027-04-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_04
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-04-01') TO ('2027-05-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_05
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-05-01') TO ('2027-06-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_06
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-06-01') TO ('2027-07-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_07
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-07-01') TO ('2027-08-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_08
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-08-01') TO ('2027-09-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_09
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-09-01') TO ('2027-10-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_10
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-10-01') TO ('2027-11-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_11
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-11-01') TO ('2027-12-01');
CREATE TABLE IF NOT EXISTS audit.audit_events_2027_12
PARTITION OF audit.audit_events
FOR VALUES FROM ('2027-12-01') TO ('2028-01-01');
-- fhir_rejected_submissions partitions (same date range)
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_01
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-01-01') TO ('2025-02-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_02
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-02-01') TO ('2025-03-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_03
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-03-01') TO ('2025-04-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_04
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-04-01') TO ('2025-05-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_05
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-05-01') TO ('2025-06-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_06
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-06-01') TO ('2025-07-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_07
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-07-01') TO ('2025-08-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_08
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-08-01') TO ('2025-09-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_09
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-09-01') TO ('2025-10-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_10
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-10-01') TO ('2025-11-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_11
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-11-01') TO ('2025-12-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2025_12
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2025-12-01') TO ('2026-01-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_01
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-01-01') TO ('2026-02-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_02
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-02-01') TO ('2026-03-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_03
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-03-01') TO ('2026-04-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_04
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-04-01') TO ('2026-05-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_05
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-05-01') TO ('2026-06-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_06
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-06-01') TO ('2026-07-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_07
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-07-01') TO ('2026-08-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_08
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-08-01') TO ('2026-09-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_09
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-09-01') TO ('2026-10-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_10
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-10-01') TO ('2026-11-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_11
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-11-01') TO ('2026-12-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2026_12
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2026-12-01') TO ('2027-01-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_01
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-01-01') TO ('2027-02-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_02
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-02-01') TO ('2027-03-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_03
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-03-01') TO ('2027-04-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_04
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-04-01') TO ('2027-05-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_05
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-05-01') TO ('2027-06-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_06
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-06-01') TO ('2027-07-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_07
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-07-01') TO ('2027-08-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_08
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-08-01') TO ('2027-09-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_09
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-09-01') TO ('2027-10-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_10
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-10-01') TO ('2027-11-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_11
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-11-01') TO ('2027-12-01');
CREATE TABLE IF NOT EXISTS audit.fhir_rejected_submissions_2027_12
PARTITION OF audit.fhir_rejected_submissions
FOR VALUES FROM ('2027-12-01') TO ('2028-01-01');
-- ---------------------------------------------------------------------------
-- GRANTS — INSERT only for audit_writer, SELECT only for audit_reader
-- Applied to parent tables; PostgreSQL propagates to all partitions.
-- ---------------------------------------------------------------------------
-- audit_writer: INSERT only — no SELECT, UPDATE, DELETE, TRUNCATE
GRANT INSERT ON audit.audit_events TO audit_writer;
GRANT INSERT ON audit.fhir_rejected_submissions TO audit_writer;
-- audit_reader: SELECT only — for DGHS analytics and admin tools
GRANT SELECT ON audit.audit_events TO audit_reader;
GRANT SELECT ON audit.fhir_rejected_submissions TO audit_reader;
GRANT SELECT ON audit.schema_version TO audit_reader;
-- Sequences: audit_writer does not need sequence access because
-- event_id and submission_id are UUID generated by the application,
-- not database sequences.
-- ---------------------------------------------------------------------------
-- PARTITION MAINTENANCE FUNCTION
-- Callable by the cron job to create next month's partitions.
-- Run on the 20th of each month to create the following month's partition.
-- ---------------------------------------------------------------------------
CREATE OR REPLACE FUNCTION audit.create_next_month_partitions()
RETURNS void
LANGUAGE plpgsql
SECURITY DEFINER -- runs as the function owner (postgres superuser)
AS $$
DECLARE
next_month DATE;
month_after DATE;
partition_name TEXT;
month_str TEXT;
BEGIN
next_month := DATE_TRUNC('month', NOW()) + INTERVAL '1 month';
month_after := next_month + INTERVAL '1 month';
month_str := TO_CHAR(next_month, 'YYYY_MM');
-- audit_events partition
partition_name := 'audit.audit_events_' || month_str;
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = 'audit'
AND c.relname = 'audit_events_' || month_str
) THEN
EXECUTE format(
'CREATE TABLE %I PARTITION OF audit.audit_events FOR VALUES FROM (%L) TO (%L)',
'audit_events_' || month_str,
next_month::TEXT,
month_after::TEXT
);
RAISE NOTICE 'Created partition: %', partition_name;
ELSE
RAISE NOTICE 'Partition already exists: %', partition_name;
END IF;
-- fhir_rejected_submissions partition
partition_name := 'audit.fhir_rejected_submissions_' || month_str;
IF NOT EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = 'audit'
AND c.relname = 'fhir_rejected_submissions_' || month_str
) THEN
EXECUTE format(
'CREATE TABLE %I PARTITION OF audit.fhir_rejected_submissions FOR VALUES FROM (%L) TO (%L)',
'fhir_rejected_submissions_' || month_str,
next_month::TEXT,
month_after::TEXT
);
RAISE NOTICE 'Created partition: %', partition_name;
ELSE
RAISE NOTICE 'Partition already exists: %', partition_name;
END IF;
END;
$$;
-- Grant execute to a maintenance role (not to audit_writer)
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'audit_maintainer') THEN
CREATE ROLE audit_maintainer NOLOGIN;
END IF;
END
$$;
GRANT EXECUTE ON FUNCTION audit.create_next_month_partitions() TO audit_maintainer;
-- ---------------------------------------------------------------------------
-- VERIFICATION QUERIES (run manually after migration to confirm correctness)
-- ---------------------------------------------------------------------------
-- Confirm partition count (should be 36 per table for 2025-2027):
-- SELECT COUNT(*) FROM pg_inherits i
-- JOIN pg_class p ON p.oid = i.inhparent
-- JOIN pg_namespace n ON n.oid = p.relnamespace
-- WHERE n.nspname = 'audit' AND p.relname = 'audit_events';
-- Confirm INSERT-only grant for audit_writer:
-- SELECT grantee, table_name, privilege_type
-- FROM information_schema.role_table_grants
-- WHERE table_schema = 'audit' AND grantee = 'audit_writer';
-- Expected: only INSERT rows, no SELECT/UPDATE/DELETE.
-- Test partition routing:
-- INSERT INTO audit.audit_events (event_id, event_time, event_type, outcome, client_id, subject)
-- VALUES (gen_random_uuid(), NOW(), 'OPERATION', 'ACCEPTED', 'test', 'test');
-- SELECT tableoid::regclass, event_time FROM audit.audit_events LIMIT 1;
-- Should show audit.audit_events_YYYY_MM matching current month.

View File

@@ -0,0 +1,722 @@
-- =============================================================================
-- V1__hapi_schema.sql
-- HAPI FHIR 7.2.0 JPA Schema — PostgreSQL 15
--
-- MAINTAINER WARNING:
-- This file is the authoritative schema for the HAPI JPA store.
-- It was derived from HAPI 7.2.0 Hibernate entity mappings.
-- DO NOT MODIFY this file after it has run in any environment.
-- If HAPI is upgraded, write V3__hapi_schema_upgrade_X_Y_Z.sql.
--
-- To verify this schema matches a new HAPI version:
-- 1. Stand up HAPI with ddl-auto=create against a clean DB
-- 2. Dump schema: pg_dump --schema-only
-- 3. Diff against this file
-- 4. Write incremental migration for any differences
--
-- PARTITIONING NOTE:
-- HAPI JPA tables are NOT partitioned in this migration.
-- Partition candidates at 10M+ resources:
-- - HFJ_RESOURCE (partition by RES_TYPE or RES_UPDATED)
-- - HFJ_RES_VER (partition by RES_UPDATED)
-- - HFJ_SPIDX_STRING (partition by SP_UPDATED)
-- - HFJ_SPIDX_TOKEN (partition by SP_UPDATED)
-- - HFJ_SPIDX_DATE (partition by SP_LOW_VALUE)
-- At <10,000 resources/day (pilot phase), PostgreSQL B-tree indexes
-- are sufficient. Re-evaluate at 5M total resources.
-- =============================================================================
-- ---------------------------------------------------------------------------
-- SEQUENCES
-- ---------------------------------------------------------------------------
CREATE SEQUENCE IF NOT EXISTS hfj_resource_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_res_ver_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_history_tag_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_res_tag_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_forced_id_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_search_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_search_result_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_subscription_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_tag_def_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_res_link_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_string_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_token_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_number_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_quantity_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_date_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_uri_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_spidx_coords_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_concept_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_concept_map_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_concept_map_grp_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_concept_map_grp_elm_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_concept_map_grp_elm_tgt_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_valueset_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_valueset_concept_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS npm_package_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS npm_package_ver_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS npm_package_ver_res_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_batch2_job_inst_seq START WITH 1 INCREMENT BY 50;
CREATE SEQUENCE IF NOT EXISTS hfj_batch2_wrkchunk_seq START WITH 1 INCREMENT BY 50;
-- ---------------------------------------------------------------------------
-- TAG DEFINITIONS
-- Stores all tag values referenced by resources (security, profile, general)
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_TAG_DEF (
TAG_ID BIGINT NOT NULL DEFAULT nextval('hfj_tag_def_seq'),
TAG_CODE VARCHAR(200) NOT NULL,
TAG_DISPLAY VARCHAR(200),
TAG_SYSTEM VARCHAR(200),
TAG_TYPE SMALLINT NOT NULL,
CONSTRAINT PK_HFJ_TAG_DEF PRIMARY KEY (TAG_ID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_TAGDEF_TYPESYSCODE
ON HFJ_TAG_DEF (TAG_TYPE, TAG_SYSTEM, TAG_CODE);
-- ---------------------------------------------------------------------------
-- CORE RESOURCE TABLE
-- One row per logical resource (not per version).
-- RES_VER column tracks current version number.
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_RESOURCE (
RES_ID BIGINT NOT NULL DEFAULT nextval('hfj_resource_seq'),
RES_TYPE VARCHAR(40) NOT NULL,
RES_VERSION VARCHAR(7) NOT NULL, -- FHIR version: R4
RES_ENCODING_ENUM VARCHAR(11),
RES_DELETED_AT TIMESTAMP WITH TIME ZONE,
RES_LAST_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
RES_PUBLISHED TIMESTAMP WITH TIME ZONE NOT NULL,
RES_VER BIGINT NOT NULL,
FHIR_ID VARCHAR(64),
HAS_TAGS BOOLEAN NOT NULL DEFAULT FALSE,
SP_HAS_LINKS BOOLEAN NOT NULL DEFAULT FALSE,
HASH_SHA256 VARCHAR(64),
RES_TITLE VARCHAR(200),
CONSTRAINT PK_HFJ_RESOURCE PRIMARY KEY (RES_ID)
);
CREATE INDEX IF NOT EXISTS IDX_RES_TYPE_FHIRID
ON HFJ_RESOURCE (RES_TYPE, FHIR_ID);
CREATE INDEX IF NOT EXISTS IDX_RES_UPDATED
ON HFJ_RESOURCE (RES_LAST_UPDATED);
CREATE INDEX IF NOT EXISTS IDX_RES_TYPE_UPDATED
ON HFJ_RESOURCE (RES_TYPE, RES_LAST_UPDATED);
-- BD-specific: unvalidated-profile tag index
-- Supports: GET /fhir/[type]?_tag=https://fhir.dghs.gov.bd/tags|unvalidated-profile
CREATE INDEX IF NOT EXISTS IDX_RES_HASH
ON HFJ_RESOURCE (HASH_SHA256)
WHERE HASH_SHA256 IS NOT NULL;
-- ---------------------------------------------------------------------------
-- FORCED IDS
-- Maps client-assigned logical IDs to internal numeric IDs
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_FORCED_ID (
FORCEDID_ID BIGINT NOT NULL DEFAULT nextval('hfj_forced_id_seq'),
RESOURCE_PID BIGINT NOT NULL,
FHIR_ID VARCHAR(100) NOT NULL,
RES_TYPE VARCHAR(40),
CONSTRAINT PK_HFJ_FORCED_ID PRIMARY KEY (FORCEDID_ID),
CONSTRAINT FK_FORCEDID_RESOURCE FOREIGN KEY (RESOURCE_PID)
REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_FORCEDID_TYPE_FID
ON HFJ_FORCED_ID (RES_TYPE, FHIR_ID);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_FORCEDID_RESID
ON HFJ_FORCED_ID (RESOURCE_PID);
-- ---------------------------------------------------------------------------
-- RESOURCE VERSIONS
-- Full resource content stored here. One row per version per resource.
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_RES_VER (
PID BIGINT NOT NULL DEFAULT nextval('hfj_res_ver_seq'),
RES_ID BIGINT NOT NULL,
RES_VER BIGINT NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
RES_ENCODING VARCHAR(11) NOT NULL,
RES_TEXT TEXT,
RES_TEXT_VC TEXT,
RES_DELETED_AT TIMESTAMP WITH TIME ZONE,
RES_LAST_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
RES_PUBLISHED TIMESTAMP WITH TIME ZONE NOT NULL,
SOURCE_URI VARCHAR(100),
REQUEST_ID VARCHAR(16),
RES_TITLE VARCHAR(200),
CONSTRAINT PK_HFJ_RES_VER PRIMARY KEY (PID),
CONSTRAINT FK_RESVER_RES FOREIGN KEY (RES_ID)
REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_RESVER_ID_VER
ON HFJ_RES_VER (RES_ID, RES_VER);
CREATE INDEX IF NOT EXISTS IDX_RESVER_UPDATED
ON HFJ_RES_VER (RES_LAST_UPDATED);
-- ---------------------------------------------------------------------------
-- RESOURCE TAGS
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_RES_TAG (
TAG_ID BIGINT NOT NULL DEFAULT nextval('hfj_res_tag_seq'),
RES_ID BIGINT NOT NULL,
TAG_DEFID BIGINT NOT NULL,
RES_TYPE VARCHAR(40),
CONSTRAINT PK_HFJ_RES_TAG PRIMARY KEY (TAG_ID),
CONSTRAINT FK_RESTAG_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID),
CONSTRAINT FK_RESTAG_TAGDEF FOREIGN KEY (TAG_DEFID) REFERENCES HFJ_TAG_DEF (TAG_ID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_RES_TAG
ON HFJ_RES_TAG (RES_ID, TAG_DEFID);
CREATE TABLE IF NOT EXISTS HFJ_HISTORY_TAG (
TAG_ID BIGINT NOT NULL DEFAULT nextval('hfj_history_tag_seq'),
RES_VER_PID BIGINT NOT NULL,
RES_TYPE VARCHAR(40),
RES_ID BIGINT NOT NULL,
TAG_DEFID BIGINT NOT NULL,
CONSTRAINT PK_HFJ_HISTORY_TAG PRIMARY KEY (TAG_ID),
CONSTRAINT FK_HISTTAG_RESVER FOREIGN KEY (RES_VER_PID) REFERENCES HFJ_RES_VER (PID),
CONSTRAINT FK_HISTTAG_TAGDEF FOREIGN KEY (TAG_DEFID) REFERENCES HFJ_TAG_DEF (TAG_ID)
);
-- ---------------------------------------------------------------------------
-- RESOURCE LINKS (reference index)
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_RES_LINK (
PID BIGINT NOT NULL DEFAULT nextval('hfj_res_link_seq'),
SRC_RESOURCE_ID BIGINT NOT NULL,
SRC_PATH VARCHAR(200) NOT NULL,
TARGET_RESOURCE_ID BIGINT,
TARGET_RESOURCE_TYPE VARCHAR(40),
TARGET_RESOURCE_URL VARCHAR(200),
UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
SP_USERAGENT VARCHAR(200),
CONSTRAINT PK_HFJ_RES_LINK PRIMARY KEY (PID),
CONSTRAINT FK_RESLINK_SRC FOREIGN KEY (SRC_RESOURCE_ID)
REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE INDEX IF NOT EXISTS IDX_RL_TPATHRES ON HFJ_RES_LINK (SRC_PATH, SRC_RESOURCE_ID);
CREATE INDEX IF NOT EXISTS IDX_RL_TARGET ON HFJ_RES_LINK (TARGET_RESOURCE_ID);
-- ---------------------------------------------------------------------------
-- SEARCH PARAMETER INDEXES
-- One table per data type. HAPI uses these for FHIR search queries.
-- ---------------------------------------------------------------------------
-- String parameters (name, address, etc.)
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_STRING (
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_string_seq'),
RES_ID BIGINT NOT NULL,
SP_NAME VARCHAR(100) NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
SP_VALUE_EXACT VARCHAR(200),
SP_VALUE_NORM VARCHAR(200),
HASH_IDENTITY BIGINT,
HASH_EXACT BIGINT,
HASH_NORM_PREFIX BIGINT,
CONSTRAINT PK_HFJ_SPIDX_STRING PRIMARY KEY (SP_ID),
CONSTRAINT FK_SPIDXSTR_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE INDEX IF NOT EXISTS IDX_SP_STRING_HASH_NRM ON HFJ_SPIDX_STRING (HASH_NORM_PREFIX, SP_VALUE_NORM);
CREATE INDEX IF NOT EXISTS IDX_SP_STRING_HASH_EXCT ON HFJ_SPIDX_STRING (HASH_EXACT);
CREATE INDEX IF NOT EXISTS IDX_SP_STRING_RESID ON HFJ_SPIDX_STRING (RES_ID);
-- Token parameters (code, identifier, status, etc.)
-- Most heavily used for ICD-11 code searches
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_TOKEN (
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_token_seq'),
RES_ID BIGINT NOT NULL,
SP_NAME VARCHAR(100) NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
SP_SYSTEM VARCHAR(200),
SP_VALUE VARCHAR(200),
HASH_IDENTITY BIGINT,
HASH_SYS_AND_VALUE BIGINT,
HASH_VALUE BIGINT,
HASH_SYSTEM BIGINT,
CONSTRAINT PK_HFJ_SPIDX_TOKEN PRIMARY KEY (SP_ID),
CONSTRAINT FK_SPIDXTOK_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE INDEX IF NOT EXISTS IDX_SP_TOKEN_HASH ON HFJ_SPIDX_TOKEN (HASH_IDENTITY);
CREATE INDEX IF NOT EXISTS IDX_SP_TOKEN_HASH_SV ON HFJ_SPIDX_TOKEN (HASH_SYS_AND_VALUE);
CREATE INDEX IF NOT EXISTS IDX_SP_TOKEN_HASH_V ON HFJ_SPIDX_TOKEN (HASH_VALUE);
CREATE INDEX IF NOT EXISTS IDX_SP_TOKEN_RESID ON HFJ_SPIDX_TOKEN (RES_ID);
-- Date parameters (birthdate, recorded, onset, etc.)
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_DATE (
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_date_seq'),
RES_ID BIGINT NOT NULL,
SP_NAME VARCHAR(100) NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
SP_LOW_VALUE TIMESTAMP WITH TIME ZONE,
SP_HIGH_VALUE TIMESTAMP WITH TIME ZONE,
HASH_IDENTITY BIGINT,
CONSTRAINT PK_HFJ_SPIDX_DATE PRIMARY KEY (SP_ID),
CONSTRAINT FK_SPIDXDATE_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE INDEX IF NOT EXISTS IDX_SP_DATE_HASH ON HFJ_SPIDX_DATE (HASH_IDENTITY);
CREATE INDEX IF NOT EXISTS IDX_SP_DATE_HASH_LOW ON HFJ_SPIDX_DATE (HASH_IDENTITY, SP_LOW_VALUE);
CREATE INDEX IF NOT EXISTS IDX_SP_DATE_HASH_HIGH ON HFJ_SPIDX_DATE (HASH_IDENTITY, SP_HIGH_VALUE);
CREATE INDEX IF NOT EXISTS IDX_SP_DATE_RESID ON HFJ_SPIDX_DATE (RES_ID);
-- Number parameters
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_NUMBER (
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_number_seq'),
RES_ID BIGINT NOT NULL,
SP_NAME VARCHAR(100) NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
SP_VALUE NUMERIC(19,9),
HASH_IDENTITY BIGINT,
CONSTRAINT PK_HFJ_SPIDX_NUMBER PRIMARY KEY (SP_ID),
CONSTRAINT FK_SPIDXNUM_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE INDEX IF NOT EXISTS IDX_SP_NUMBER_HASH_VAL ON HFJ_SPIDX_NUMBER (HASH_IDENTITY, SP_VALUE);
-- Quantity parameters (Observation.value, etc.)
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_QUANTITY (
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_quantity_seq'),
RES_ID BIGINT NOT NULL,
SP_NAME VARCHAR(100) NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
SP_VALUE NUMERIC(19,9),
SP_SYSTEM VARCHAR(200),
SP_UNITS VARCHAR(200),
HASH_IDENTITY BIGINT,
HASH_SYS_UNITS_VAL BIGINT,
HASH_UNITS_VAL BIGINT,
CONSTRAINT PK_HFJ_SPIDX_QUANTITY PRIMARY KEY (SP_ID),
CONSTRAINT FK_SPIDXQTY_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE INDEX IF NOT EXISTS IDX_SP_QUANTITY_HASH ON HFJ_SPIDX_QUANTITY (HASH_IDENTITY);
CREATE INDEX IF NOT EXISTS IDX_SP_QUANTITY_HASH_UN ON HFJ_SPIDX_QUANTITY (HASH_UNITS_VAL);
CREATE INDEX IF NOT EXISTS IDX_SP_QUANTITY_HASH_SUVAL ON HFJ_SPIDX_QUANTITY (HASH_SYS_UNITS_VAL);
-- URI parameters (url, instantiates, etc.)
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_URI (
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_uri_seq'),
RES_ID BIGINT NOT NULL,
SP_NAME VARCHAR(100) NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
SP_URI VARCHAR(254),
HASH_IDENTITY BIGINT,
HASH_URI BIGINT,
CONSTRAINT PK_HFJ_SPIDX_URI PRIMARY KEY (SP_ID),
CONSTRAINT FK_SPIDXURI_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE INDEX IF NOT EXISTS IDX_SP_URI_HASH_URI ON HFJ_SPIDX_URI (HASH_URI);
CREATE INDEX IF NOT EXISTS IDX_SP_URI_RESID ON HFJ_SPIDX_URI (RES_ID);
-- Coordinate parameters (Location.position)
CREATE TABLE IF NOT EXISTS HFJ_SPIDX_COORDS (
SP_ID BIGINT NOT NULL DEFAULT nextval('hfj_spidx_coords_seq'),
RES_ID BIGINT NOT NULL,
SP_NAME VARCHAR(100) NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
SP_UPDATED TIMESTAMP WITH TIME ZONE NOT NULL,
SP_LATITUDE DOUBLE PRECISION,
SP_LONGITUDE DOUBLE PRECISION,
HASH_IDENTITY BIGINT,
CONSTRAINT PK_HFJ_SPIDX_COORDS PRIMARY KEY (SP_ID),
CONSTRAINT FK_SPIDXCOORDS_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
);
-- ---------------------------------------------------------------------------
-- SEARCH RESULTS (paging)
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_SEARCH (
PID BIGINT NOT NULL DEFAULT nextval('hfj_search_seq'),
SEARCH_UUID VARCHAR(36) NOT NULL,
RESOURCE_TYPE VARCHAR(200),
SEARCH_TYPE SMALLINT NOT NULL,
SEARCH_STATUS VARCHAR(10) NOT NULL,
CREATED TIMESTAMP WITH TIME ZONE NOT NULL,
EXPIRY_OR_NULL TIMESTAMP WITH TIME ZONE,
TOTAL_COUNT INT,
NUM_FOUND INT,
SEARCH_PARAM_MAP BYTEA,
SEARCH_LAST_RETURNED TIMESTAMP WITH TIME ZONE,
SEARCH_QUERY_STRING TEXT,
SEARCH_QUERY_STRING_HASH INT,
PREFERRED_PAGE_SIZE INT,
FAILURE_CODE INT,
FAILURE_MESSAGE TEXT,
SEARCH_DELETED BOOLEAN,
LAST_UPDATED_HIGH TIMESTAMP WITH TIME ZONE,
LAST_UPDATED_LOW TIMESTAMP WITH TIME ZONE,
OPT_LOCK_VERSION INT,
CONSTRAINT PK_HFJ_SEARCH PRIMARY KEY (PID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_SEARCH_UUID ON HFJ_SEARCH (SEARCH_UUID);
CREATE INDEX IF NOT EXISTS IDX_SEARCH_LASTRETURNED ON HFJ_SEARCH (SEARCH_LAST_RETURNED);
CREATE INDEX IF NOT EXISTS IDX_SEARCH_RESTYPE ON HFJ_SEARCH (RESOURCE_TYPE);
CREATE TABLE IF NOT EXISTS HFJ_SEARCH_RESULT (
PID BIGINT NOT NULL DEFAULT nextval('hfj_search_result_seq'),
SEARCH_PID BIGINT NOT NULL,
RES_ID BIGINT NOT NULL,
ORDER_NUM INT NOT NULL,
CONSTRAINT PK_HFJ_SEARCH_RESULT PRIMARY KEY (PID),
CONSTRAINT FK_SEARCHRES_SEARCH FOREIGN KEY (SEARCH_PID) REFERENCES HFJ_SEARCH (PID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_SEARCHRES_ORDER
ON HFJ_SEARCH_RESULT (SEARCH_PID, ORDER_NUM);
CREATE INDEX IF NOT EXISTS IDX_SEARCHRES_RESID
ON HFJ_SEARCH_RESULT (SEARCH_PID, RES_ID);
-- ---------------------------------------------------------------------------
-- SUBSCRIPTIONS
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_SUBSCRIPTION_STATS (
PID BIGINT NOT NULL DEFAULT nextval('hfj_subscription_seq'),
RES_ID BIGINT NOT NULL,
CREATED_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
DELIVERY_FAILED_COUNT INT NOT NULL DEFAULT 0,
DELIVERY_ORPHANED_COUNT INT NOT NULL DEFAULT 0,
DELIVERY_SUCCESS_COUNT INT NOT NULL DEFAULT 0,
CONSTRAINT PK_HFJ_SUBSCRIPTION_STATS PRIMARY KEY (PID),
CONSTRAINT FK_SUBSC_RES FOREIGN KEY (RES_ID) REFERENCES HFJ_RESOURCE (RES_ID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_SUBSC_RESID ON HFJ_SUBSCRIPTION_STATS (RES_ID);
-- ---------------------------------------------------------------------------
-- NPM PACKAGES (IG storage)
-- HAPI writes IG package metadata here on first load.
-- IgPackageInitializer uses advisory lock to prevent race condition
-- on multi-replica startup.
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS NPM_PACKAGE (
PID BIGINT NOT NULL DEFAULT nextval('npm_package_seq'),
PACKAGE_ID VARCHAR(200) NOT NULL,
CUR_VERSION_ID BIGINT,
UPDATED_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
CONSTRAINT PK_NPM_PACKAGE PRIMARY KEY (PID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_NPMPACKAGE_PKGID ON NPM_PACKAGE (PACKAGE_ID);
CREATE TABLE IF NOT EXISTS NPM_PACKAGE_VER (
PID BIGINT NOT NULL DEFAULT nextval('npm_package_ver_seq'),
PACKAGE_ID VARCHAR(200) NOT NULL,
VERSION_ID VARCHAR(200) NOT NULL,
PKG_PID BIGINT NOT NULL,
PACKAGE_DESC VARCHAR(200),
FHIR_VERSION VARCHAR(10) NOT NULL,
FHIR_VERSION_ID SMALLINT NOT NULL,
CURRENT_VERSION BOOLEAN,
UPDATED_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
INSTALLED_SIZE BIGINT,
CONSTRAINT PK_NPM_PACKAGE_VER PRIMARY KEY (PID),
CONSTRAINT FK_NPM_PKG_VER_PKG FOREIGN KEY (PKG_PID) REFERENCES NPM_PACKAGE (PID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_NPMPACKAGEVER_URL
ON NPM_PACKAGE_VER (PACKAGE_ID, VERSION_ID);
CREATE INDEX IF NOT EXISTS IDX_NPMPACKAGEVER_PKGID
ON NPM_PACKAGE_VER (PKG_PID);
CREATE TABLE IF NOT EXISTS NPM_PACKAGE_VER_RES (
PID BIGINT NOT NULL DEFAULT nextval('npm_package_ver_res_seq'),
PACKVER_PID BIGINT NOT NULL,
RES_TYPE VARCHAR(40) NOT NULL,
FHIR_ID VARCHAR(64) NOT NULL,
FHIR_VERSION VARCHAR(10),
FHIR_VERSION_ID SMALLINT,
RES_VERSIONLESS_ID VARCHAR(200),
FILE_DIR VARCHAR(200),
FILE_NAME VARCHAR(200),
RES_SIZE_BYTES BIGINT,
CANONICAL_URL VARCHAR(200),
CANONICAL_VERSION VARCHAR(200),
RES_TEXT MEDIUMTEXT,
CONSTRAINT PK_NPM_PKG_VER_RES PRIMARY KEY (PID),
CONSTRAINT FK_NPM_PKG_VER_RES_PKG FOREIGN KEY (PACKVER_PID)
REFERENCES NPM_PACKAGE_VER (PID)
);
CREATE INDEX IF NOT EXISTS IDX_PACKVERRES_PACKVER ON NPM_PACKAGE_VER_RES (PACKVER_PID);
CREATE INDEX IF NOT EXISTS IDX_PACKVERRES_CANONICAL ON NPM_PACKAGE_VER_RES (CANONICAL_URL);
CREATE INDEX IF NOT EXISTS IDX_PACKVERRES_TYPE ON NPM_PACKAGE_VER_RES (RES_TYPE, FHIR_ID);
-- ---------------------------------------------------------------------------
-- TERMINOLOGY TABLES
-- ConceptMap, ValueSet expansion cache, Concept definitions
-- Note: BD Core IG uses OCL as terminology authority — these tables
-- are used by HAPI's internal terminology infrastructure, not for
-- primary ICD-11 storage.
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS TRM_CODESYSTEM (
PID BIGINT NOT NULL,
RES_ID BIGINT,
CS_NAME VARCHAR(200),
CS_URI VARCHAR(200),
CS_VERSION VARCHAR(200),
CURRENT_VERSION_PID BIGINT,
CODESYSTEM_PID BIGINT,
CONSTRAINT PK_TRM_CODESYSTEM PRIMARY KEY (PID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_CS_NAME ON TRM_CODESYSTEM (CS_NAME);
CREATE TABLE IF NOT EXISTS TRM_CODESYSTEM_VER (
PID BIGINT NOT NULL,
CS_PID BIGINT NOT NULL,
RES_ID BIGINT,
CS_VERSION_ID VARCHAR(200),
CURRENT_VERSION BOOLEAN,
CS_DISPLAY_NAME VARCHAR(200),
CONSTRAINT PK_TRM_CODESYSTEM_VER PRIMARY KEY (PID),
CONSTRAINT FK_CODESYSVER_CS FOREIGN KEY (CS_PID) REFERENCES TRM_CODESYSTEM (PID)
);
CREATE TABLE IF NOT EXISTS TRM_CONCEPT (
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_seq'),
CODESYSTEM_PID BIGINT NOT NULL,
CODE VARCHAR(500) NOT NULL,
DISPLAY VARCHAR(400),
PARENT_PIDS TEXT,
CODE_SEQUENCE INT,
INDEX_STATUS BIGINT,
CONCEPT_UPDATED TIMESTAMP WITH TIME ZONE,
HASH_CODE BIGINT,
CONSTRAINT PK_TRM_CONCEPT PRIMARY KEY (PID),
CONSTRAINT FK_CONCEPT_PID_CS FOREIGN KEY (CODESYSTEM_PID)
REFERENCES TRM_CODESYSTEM_VER (PID)
);
CREATE INDEX IF NOT EXISTS IDX_CONCEPT_CODESYSTEM ON TRM_CONCEPT (CODESYSTEM_PID);
CREATE INDEX IF NOT EXISTS IDX_CONCEPT_UPDATED ON TRM_CONCEPT (CONCEPT_UPDATED);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_CONCEPT_CS_CODE
ON TRM_CONCEPT (CODESYSTEM_PID, CODE);
CREATE TABLE IF NOT EXISTS TRM_CONCEPT_MAP (
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_map_seq'),
RES_ID BIGINT,
CONCEPT_MAP_URL VARCHAR(200),
CM_VERSION VARCHAR(200),
SOURCE_VS VARCHAR(200),
TARGET_VS VARCHAR(200),
CONSTRAINT PK_TRM_CONCEPT_MAP PRIMARY KEY (PID)
);
CREATE TABLE IF NOT EXISTS TRM_CONCEPT_MAP_GROUP (
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_map_grp_seq'),
MAP_PID BIGINT NOT NULL,
SOURCE_CS VARCHAR(200),
SOURCE_VER VARCHAR(200),
TARGET_CS VARCHAR(200),
TARGET_VER VARCHAR(200),
MAP_ORDER INT,
CONSTRAINT PK_TRM_CONCEPT_MAP_GROUP PRIMARY KEY (PID),
CONSTRAINT FK_CMG_MAP FOREIGN KEY (MAP_PID) REFERENCES TRM_CONCEPT_MAP (PID)
);
CREATE TABLE IF NOT EXISTS TRM_CONCEPT_MAP_GRP_ELEMENT (
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_map_grp_elm_seq'),
GROUP_PID BIGINT NOT NULL,
SOURCE_CODE VARCHAR(500) NOT NULL,
SOURCE_DISPLAY VARCHAR(400),
SYSTEM_VERSION VARCHAR(200),
CONSTRAINT PK_TRM_CONCEPT_MAP_GRP_ELEMENT PRIMARY KEY (PID),
CONSTRAINT FK_CMEL_GRP FOREIGN KEY (GROUP_PID) REFERENCES TRM_CONCEPT_MAP_GROUP (PID)
);
CREATE TABLE IF NOT EXISTS TRM_CONCEPT_MAP_GRP_ELM_TGT (
PID BIGINT NOT NULL DEFAULT nextval('hfj_concept_map_grp_elm_tgt_seq'),
ELEMENT_PID BIGINT NOT NULL,
TARGET_CODE VARCHAR(500),
TARGET_DISPLAY VARCHAR(400),
TARGET_CODE_SYS_VER VARCHAR(200),
TARGET_EQUIVALENCE VARCHAR(50),
VALUESET_ORDER INT,
HASH_IDENTITY BIGINT,
CONSTRAINT PK_TRM_CONCEPT_MAP_GRP_ELM_TGT PRIMARY KEY (PID),
CONSTRAINT FK_CMELTGT_ELM FOREIGN KEY (ELEMENT_PID)
REFERENCES TRM_CONCEPT_MAP_GRP_ELEMENT (PID)
);
CREATE TABLE IF NOT EXISTS TRM_VALUESET (
PID BIGINT NOT NULL DEFAULT nextval('hfj_valueset_seq'),
EXPANSION_STATUS VARCHAR(50) NOT NULL,
VS_NAME VARCHAR(200),
VS_URL VARCHAR(200) NOT NULL,
VS_VERSION VARCHAR(200),
TOTAL_CONCEPT_COUNT INT,
EXPAN_ID VARCHAR(200),
UPDATED_TIMESTAMP TIMESTAMP WITH TIME ZONE,
HASH_IDENTITY BIGINT,
CONSTRAINT PK_TRM_VALUESET PRIMARY KEY (PID)
);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_VS_HASH ON TRM_VALUESET (HASH_IDENTITY);
CREATE TABLE IF NOT EXISTS TRM_VALUESET_CONCEPT (
PID BIGINT NOT NULL DEFAULT nextval('hfj_valueset_concept_seq'),
VALUESET_PID BIGINT NOT NULL,
SYSTEM_URL VARCHAR(200) NOT NULL,
SYSTEM_VER VARCHAR(200),
CODEVAL VARCHAR(500) NOT NULL,
DISPLAY VARCHAR(400),
INDEX_STATUS BIGINT,
SOURCE_PID BIGINT,
VS_CONCEPT_UPDATED TIMESTAMP WITH TIME ZONE,
HASH_CODEVAL BIGINT,
HASH_SYS_AND_CODEVAL BIGINT,
CONSTRAINT PK_TRM_VALUESET_CONCEPT PRIMARY KEY (PID),
CONSTRAINT FK_TRM_VALUESET_PID FOREIGN KEY (VALUESET_PID) REFERENCES TRM_VALUESET (PID)
);
CREATE INDEX IF NOT EXISTS IDX_VSCON_VS ON TRM_VALUESET_CONCEPT (VALUESET_PID);
CREATE UNIQUE INDEX IF NOT EXISTS IDX_VSCON_HASHCODEVAL
ON TRM_VALUESET_CONCEPT (VALUESET_PID, HASH_CODEVAL)
WHERE HASH_CODEVAL IS NOT NULL;
-- ---------------------------------------------------------------------------
-- BATCH2 JOB INFRASTRUCTURE (HAPI 7.x)
-- Required for bulk export, $reindex, terminology import operations.
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_BATCH2_JOB_INST (
ID VARCHAR(36) NOT NULL,
CREATE_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
START_TIME TIMESTAMP WITH TIME ZONE,
END_TIME TIMESTAMP WITH TIME ZONE,
STAT VARCHAR(20) NOT NULL,
JOB_DEFN_ID VARCHAR(100) NOT NULL,
JOB_PARAMS TEXT,
CMB_RECS_PROCESSED INT,
CMB_RECS_PER_SEC DOUBLE PRECISION,
TOT_ELAPSED_MILLIS INT,
IS_WORK_CHUNKS_PURGED BOOLEAN NOT NULL DEFAULT FALSE,
WORK_CHUNKS_PURGED_TIME TIMESTAMP WITH TIME ZONE,
ERROR_MSG VARCHAR(500),
ERROR_COUNT INT NOT NULL DEFAULT 0,
EST_REMAINING VARCHAR(100),
CUR_GATED_STEP_ID VARCHAR(100),
CANCELLED BOOLEAN NOT NULL DEFAULT FALSE,
REPORT TEXT,
FAST_TRACKING BOOLEAN,
TRIGGER_TIME TIMESTAMP WITH TIME ZONE,
PARAMS_HASH BIGINT,
CONSTRAINT PK_HFJ_BATCH2_JOB_INST PRIMARY KEY (ID)
);
CREATE INDEX IF NOT EXISTS IDX_BATCH2JOBS_STAT ON HFJ_BATCH2_JOB_INST (STAT);
CREATE TABLE IF NOT EXISTS HFJ_BATCH2_WORK_CHUNK (
ID VARCHAR(36) NOT NULL,
SEQ INT NOT NULL,
JOB_INSTANCE_ID VARCHAR(36) NOT NULL,
TGT_STEP_ID VARCHAR(100) NOT NULL,
STAT VARCHAR(20) NOT NULL,
CREATE_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
START_TIME TIMESTAMP WITH TIME ZONE,
END_TIME TIMESTAMP WITH TIME ZONE,
ERROR_MSG VARCHAR(500),
ERROR_COUNT INT NOT NULL DEFAULT 0,
RECORDS_PROCESSED INT,
CHUNK_DATA TEXT,
WARNING_MSG VARCHAR(500),
CONSTRAINT PK_HFJ_BATCH2_WORK_CHUNK PRIMARY KEY (ID),
CONSTRAINT FK_BATCH2WC_JOB FOREIGN KEY (JOB_INSTANCE_ID)
REFERENCES HFJ_BATCH2_JOB_INST (ID)
);
CREATE INDEX IF NOT EXISTS IDX_BATCH2WC_JOB_STAT
ON HFJ_BATCH2_WORK_CHUNK (JOB_INSTANCE_ID, STAT);
-- ---------------------------------------------------------------------------
-- RESOURCE HISTORY TABLE (cross-resource history view support)
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_RES_SEARCH_URL (
RES_SEARCH_URL VARCHAR(2000) NOT NULL,
RES_ID BIGINT NOT NULL,
CREATED_TIME TIMESTAMP WITH TIME ZONE NOT NULL,
CONSTRAINT PK_HFJ_RES_SEARCH_URL PRIMARY KEY (RES_SEARCH_URL),
CONSTRAINT FK_RES_SEARCH_URL_RES FOREIGN KEY (RES_ID)
REFERENCES HFJ_RESOURCE (RES_ID)
);
-- ---------------------------------------------------------------------------
-- PARTITION TABLE (HAPI multi-tenancy — disabled for BD deployment
-- but schema must exist as HAPI 7.x always references it)
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_PARTITION (
PART_ID INT NOT NULL,
PART_NAME VARCHAR(200) NOT NULL,
PART_DESC VARCHAR(200),
PART_STATUS VARCHAR(20) NOT NULL DEFAULT 'ACTIVE',
CONSTRAINT PK_HFJ_PARTITION PRIMARY KEY (PART_ID)
);
-- Default partition (required, always present)
INSERT INTO HFJ_PARTITION (PART_ID, PART_NAME, PART_STATUS)
VALUES (0, 'DEFAULT', 'ACTIVE')
ON CONFLICT DO NOTHING;
-- ---------------------------------------------------------------------------
-- STORED FILES TABLE (HAPI binary storage)
-- ---------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS HFJ_BINARY_STORAGE_BLOB (
BLOB_ID VARCHAR(200) NOT NULL,
RESOURCE_ID VARCHAR(100) NOT NULL,
BLOB_SIZE INT,
CONTENT_TYPE VARCHAR(100) NOT NULL,
BLOB_DATA BYTEA NOT NULL,
PUBLISHED_DATE TIMESTAMP WITH TIME ZONE NOT NULL,
BLOB_HASH VARCHAR(128),
CONSTRAINT PK_HFJ_BINARY_STORAGE_BLOB PRIMARY KEY (BLOB_ID)
);
-- ---------------------------------------------------------------------------
-- COMMENTS FOR FUTURE MIGRATIONS
-- ---------------------------------------------------------------------------
COMMENT ON TABLE HFJ_RESOURCE IS
'Core FHIR resource table. Partition candidate at 10M+ rows. '
'Suggested: PARTITION BY RANGE (RES_LAST_UPDATED) monthly. '
'Prerequisites before partitioning: convert PK to composite, '
'update all FK references. Write V3 migration when threshold reached.';
COMMENT ON TABLE HFJ_SPIDX_TOKEN IS
'Token search index. Highest write volume table — one row per coded '
'element per resource. Partition candidate at 50M+ rows. '
'Suggested: PARTITION BY HASH (RES_TYPE) with 8 partitions.';
COMMENT ON TABLE HFJ_RES_VER IS
'Resource version content. Partition candidate at 10M+ rows. '
'Suggested: PARTITION BY RANGE (RES_LAST_UPDATED) monthly.';

View File

@@ -0,0 +1,182 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
logback-spring.xml
Structured JSON logging for ELK ingestion.
Uses logstash-logback-encoder for JSON output.
Two appenders:
CONSOLE_JSON — structured JSON to stdout (Docker captures this)
FILE_JSON — structured JSON to /app/logs/ (Filebeat pickup)
Both appenders produce identical JSON format.
Filebeat ships /app/logs/ to Logstash/Elasticsearch.
Docker logs ship CONSOLE_JSON to local Docker log driver.
FIELD REFERENCE (all log entries contain):
@timestamp — ISO8601 UTC
level — TRACE/DEBUG/INFO/WARN/ERROR
logger — logger name (class)
thread — thread name
message — log message
application — bd-fhir-hapi
environment — prod
ADDITIONAL FIELDS (where present):
requestId — per-request correlation UUID (from KeycloakJwtInterceptor)
clientId — Keycloak client_id
sendingFacility — facility from token
resourceType — FHIR resource type
outcome — ACCEPTED/REJECTED
rejectionCode — rejection code (for REJECTED events)
durationMs — processing duration in milliseconds
-->
<configuration scan="false">
<!-- Spring Boot provides these properties via SpringBoot integration -->
<springProperty scope="context" name="appName"
source="spring.application.name" defaultValue="bd-fhir-hapi"/>
<springProperty scope="context" name="activeProfile"
source="spring.profiles.active" defaultValue="prod"/>
<!-- =========================================================
JSON ENCODER — shared configuration
========================================================= -->
<appender name="CONSOLE_JSON"
class="ch.qos.logback.core.ConsoleAppender">
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
<!-- Standard fields -->
<timestampPattern>yyyy-MM-dd'T'HH:mm:ss.SSSZZ</timestampPattern>
<timeZone>UTC</timeZone>
<!-- Custom static fields on every log entry -->
<customFields>{"application":"${appName}","environment":"${activeProfile}"}</customFields>
<!-- Include MDC fields — KeycloakJwtInterceptor sets these per-request -->
<includeMdcKeyName>requestId</includeMdcKeyName>
<includeMdcKeyName>clientId</includeMdcKeyName>
<includeMdcKeyName>sendingFacility</includeMdcKeyName>
<includeMdcKeyName>resourceType</includeMdcKeyName>
<includeMdcKeyName>requestIp</includeMdcKeyName>
<!-- Shorten logger name for readability -->
<shortenedLoggerNameLength>40</shortenedLoggerNameLength>
<!-- Do not include caller data (file/line number) — expensive -->
<includeCallerData>false</includeCallerData>
<!-- Include exception as structured field, not embedded in message -->
<throwableConverter class="net.logstash.logback.stacktrace.ShortenedThrowableConverter">
<maxDepthPerCause>10</maxDepthPerCause>
<maxLength>2048</maxLength>
<rootCauseFirst>true</rootCauseFirst>
</throwableConverter>
</encoder>
<!-- Do not log below WARN from noisy HAPI internals even if
root level is set lower. Override per-logger in application.yaml. -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
</appender>
<appender name="FILE_JSON"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>/app/logs/bd-fhir-hapi.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- Daily rotation + size cap -->
<fileNamePattern>/app/logs/bd-fhir-hapi.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>100MB</maxFileSize>
<maxHistory>7</maxHistory>
<totalSizeCap>1GB</totalSizeCap>
</rollingPolicy>
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
<timestampPattern>yyyy-MM-dd'T'HH:mm:ss.SSSZZ</timestampPattern>
<timeZone>UTC</timeZone>
<customFields>{"application":"${appName}","environment":"${activeProfile}"}</customFields>
<includeMdcKeyName>requestId</includeMdcKeyName>
<includeMdcKeyName>clientId</includeMdcKeyName>
<includeMdcKeyName>sendingFacility</includeMdcKeyName>
<includeMdcKeyName>resourceType</includeMdcKeyName>
<includeMdcKeyName>requestIp</includeMdcKeyName>
<shortenedLoggerNameLength>40</shortenedLoggerNameLength>
<includeCallerData>false</includeCallerData>
<throwableConverter class="net.logstash.logback.stacktrace.ShortenedThrowableConverter">
<maxDepthPerCause>10</maxDepthPerCause>
<maxLength>2048</maxLength>
<rootCauseFirst>true</rootCauseFirst>
</throwableConverter>
</encoder>
</appender>
<!-- Async wrappers — log appends must not block FHIR request threads -->
<appender name="ASYNC_CONSOLE"
class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="CONSOLE_JSON"/>
<!-- Queue capacity: 1000 log events before blocking. If the logging
pipeline is slower than log production, this queue absorbs bursts. -->
<queueSize>1000</queueSize>
<!-- discardingThreshold=0: never discard log events even when queue
is 80% full. Default is to discard INFO/DEBUG at 80% — unacceptable
for audit-adjacent logs. -->
<discardingThreshold>0</discardingThreshold>
<includeCallerData>false</includeCallerData>
</appender>
<appender name="ASYNC_FILE"
class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="FILE_JSON"/>
<queueSize>1000</queueSize>
<discardingThreshold>0</discardingThreshold>
<includeCallerData>false</includeCallerData>
</appender>
<!-- =========================================================
LOGGER CONFIGURATION
Levels here are defaults — overridden by application.yaml
logging.level.* properties which Logback reads at startup.
========================================================= -->
<!-- Our application code — INFO and above -->
<logger name="bd.gov.dghs" level="INFO" additivity="false">
<appender-ref ref="ASYNC_CONSOLE"/>
<appender-ref ref="ASYNC_FILE"/>
</logger>
<!-- HAPI internals — WARN only. INFO from HAPI is extremely verbose
and contains partial resource content. -->
<logger name="ca.uhn.hapi.fhir" level="WARN" additivity="false">
<appender-ref ref="ASYNC_CONSOLE"/>
<appender-ref ref="ASYNC_FILE"/>
</logger>
<!-- Flyway — INFO to capture migration progress at startup -->
<logger name="org.flywaydb" level="INFO" additivity="false">
<appender-ref ref="ASYNC_CONSOLE"/>
<appender-ref ref="ASYNC_FILE"/>
</logger>
<!-- Hibernate SQL — WARN. Never DEBUG in production. -->
<logger name="org.hibernate.SQL" level="WARN" additivity="false">
<appender-ref ref="ASYNC_CONSOLE"/>
<appender-ref ref="ASYNC_FILE"/>
</logger>
<!-- HikariCP pool events — WARN -->
<logger name="com.zaxxer.hikari" level="WARN" additivity="false">
<appender-ref ref="ASYNC_CONSOLE"/>
<appender-ref ref="ASYNC_FILE"/>
</logger>
<!-- Spring framework — WARN -->
<logger name="org.springframework" level="WARN" additivity="false">
<appender-ref ref="ASYNC_CONSOLE"/>
<appender-ref ref="ASYNC_FILE"/>
</logger>
<!-- Root logger — WARN for everything else -->
<root level="WARN">
<appender-ref ref="ASYNC_CONSOLE"/>
<appender-ref ref="ASYNC_FILE"/>
</root>
</configuration>