Coverage for backend/django/diagnostics/rules/engine.py: 83%
106 statements
« prev ^ index » next coverage.py v7.10.7, created at 2026-02-11 21:43 +0000
« prev ^ index » next coverage.py v7.10.7, created at 2026-02-11 21:43 +0000
1from __future__ import annotations
3import json
4from pathlib import Path
5from typing import Literal, Optional, TypedDict, cast
6from django.conf import settings
8from pydantic import BaseModel, JsonValue
10import zen
12from core.auxiliary.models.PropertyInfo import PropertyInfo
13from diagnostics.schemas import DiagnosticsFinding, Severity
14from flowsheetInternals.unitops.models.SimulationObject import SimulationObject
16# Improved typing for rule evaluation context
17#
18# RuleContext is the dictionary passed to the ZEN rules engine for evaluation.
19# It contains information about the property being checked:
20#
21# Keys:
22# - "objectType": str - Lowercased unit operation type (e.g., "pump", "heater")
23# - "propertyKey": str - Lowercased property key (e.g., "efficiency_pump")
24# - "value": float - The numeric property value being evaluated
25# - "displayName": str | None - Human-readable property name (e.g., "Pump Efficiency")
26# - "componentName": str | None - Instance name (e.g., "Pump-101")
27# - "propertyId": int - Database ID of the property
28#
29# Rules in the JDM file can reference these keys to make decisions, e.g.:
30# "if objectType == 'pump' and propertyKey == 'efficiency_pump' and value > 1.0"
31#given the above, we can use TypedDict to enforce the keys and values of the context dictionary s
32class RuleContext(TypedDict):
33 """Context dictionary passed to the ZEN rules engine for evaluation."""
34 objectType: str # Lowercased unit operation type (e.g., "pump", "heater")
35 propertyKey: str # Lowercased property key (e.g., "efficiency_pump")
36 value: float # The numeric property value being evaluated
37 displayName: str | None # Human-readable property name (e.g., "Pump Efficiency")
38 componentName: str | None # Instance name (e.g., "Pump-101")
39 propertyId: int # Database ID of the property
41# Raw finding dicts returned by the ZEN engine before conversion to RawFinding objects.
42# These come from _normalize_findings() and contain keys like:
43# - "id": str - Rule identifier
44# - "severity": str - Severity level
45# - "title": str - Finding title
46# - "description": str - Description template
47# - "limit": float | None - Threshold value (if applicable)
48class ZenFinding(BaseModel):
49 """
50 A finding row returned by the ZEN decision engine.
52 We validate this so we don't pass around untyped dicts from JSON.
53 """
54 id: str = ""
55 ruleReference: str | None = None
56 severity: Severity = "info"
57 title: str = ""
58 description: str = ""
59 limit: float | None = None
63class RuleFinding(DiagnosticsFinding):
64 """
65 A single finding from rule evaluation.
67 This is what we return when a rule matches - it describes
68 what was wrong, where it was found, and how severe it is.
69 """
71 value: Optional[float] = None # The actual value that triggered the rule
72 limit: Optional[float] = None # The limit/threshold that was exceeded
75 def to_dict(self) -> dict[str, JsonValue]:
76 """Convert to dictionary for JSON serialization to the frontend."""
77 return self.model_dump(mode="json" , exclude_none=True) #convert to json and exclude None values
80# =============================================================================
81# Module-level cache for the ZEN rules engine
82# =============================================================================
83# We cache these at module level so we don't reload the rules file on every
84# request. The cache is invalidated when the file's modification time changes.
86_ZEN_ENGINE: zen.ZenEngine | None = None # The ZEN engine instance (reusable)
87_COMPILED_DECISION: object | None = None # Compiled decision graph from rules
88_JDM_MODEL_CACHE: dict[str, JsonValue] | None = None # Raw JSON content of rules file
89_JDM_MODEL_MTIME: float | None = None # File modification time when we loaded it
92def _rules_path() -> Path:
93 """
94 Get the path to the rules JSON file from Django settings.
96 This expects DIAGNOSTICS_RULES_PATH to be set in settings.py,
97 pointing to the .json file containing our GoRules decision graph.
98 """
99 path = getattr(settings, "DIAGNOSTICS_RULES_PATH", None)
100 if path is None: 100 ↛ 101line 100 didn't jump to line 101 because the condition on line 100 was never true
101 raise RuntimeError("DIAGNOSTICS_RULES_PATH is not configured")
102 return Path(path)
105def load_decision_model() -> dict[str, JsonValue]:
106 """
107 Load the JDM (JSON Decision Model) from disk with caching.
109 Returns a dict like:
110 {
111 "nodes": [...], # Decision nodes in the graph
112 "edges": [...], # Connections between nodes
113 "metadata": {...} # Version info, etc.
114 }
116 Uses file modification time to detect changes - if the file
117 hasn't changed, we return the cached version.
118 """
119 global _JDM_MODEL_CACHE, _JDM_MODEL_MTIME
120 path = _rules_path()
121 mtime = path.stat().st_mtime
123 # Return cached if file hasn't changed
124 if _JDM_MODEL_CACHE is not None and _JDM_MODEL_MTIME == mtime:
125 return _JDM_MODEL_CACHE
127 # Load fresh from disk
128 with path.open("r", encoding="utf-8") as fh:
129 _JDM_MODEL_CACHE = json.load(fh)
130 _JDM_MODEL_MTIME = mtime
131 return _JDM_MODEL_CACHE
134def _get_decision() -> object:
135 """
136 Get or create the compiled ZEN decision graph.
138 The decision graph is what actually evaluates our rules.
139 We compile it from the JSON and cache it until the file changes.
140 """
141 global _ZEN_ENGINE, _COMPILED_DECISION, _JDM_MODEL_MTIME
143 # This also updates _JDM_MODEL_MTIME if the file changed
144 content = json.dumps(load_decision_model())
146 # Return cached decision if rules haven't changed
147 if _COMPILED_DECISION is not None and _JDM_MODEL_MTIME is not None:
148 # Check if file has changed since we last compiled
149 path = _rules_path()
150 current_mtime = path.stat().st_mtime
151 if current_mtime == _JDM_MODEL_MTIME: 151 ↛ 155line 151 didn't jump to line 155 because the condition on line 151 was always true
152 return _COMPILED_DECISION
154 # Create the ZEN engine if we don't have one
155 if _ZEN_ENGINE is None: 155 ↛ 159line 155 didn't jump to line 159 because the condition on line 155 was always true
156 _ZEN_ENGINE = zen.ZenEngine()
158 # Compile the rules into an executable decision graph
159 _COMPILED_DECISION = _ZEN_ENGINE.create_decision(content)
160 return _COMPILED_DECISION
165def build_rule_context(
166 obj: SimulationObject,
167 prop: PropertyInfo,
168 value: float,
169) -> RuleContext:
170 """
171 Build the context dictionary that gets passed to rule evaluation.
173 This is the INPUT to the rules engine. Rules can reference any
174 of these keys to make decisions. For example, a rule might check:
175 - "if objectType == 'pump' and propertyKey == 'efficiency_pump' and value > 1.0"
177 Returns a dict like:
178 {
179 "objectType": "pump", # Type of unit operation
180 "propertyKey": "efficiency_pump", # Property being checked
181 "value": 0.85, # Current value
182 "displayName": "Pump Efficiency", # Human-readable name
183 "componentName": "Pump-101", # Instance name
184 "propertyId": 42 # Database ID for linking
185 }
186 """
187 return cast(
188 RuleContext,
189 {
190 "objectType": (obj.objectType or "").lower(),
191 "propertyKey": (prop.key or "").lower(),
192 "value": value,
193 "displayName": prop.displayName,
194 "componentName": obj.componentName,
195 "propertyId": prop.id, # Always int for saved PropertyInfo instances
196 },
197 )
200def _format_description(
201 template: str, ctx: RuleContext, finding: ZenFinding
202) -> str:
203 """
204 Format a description template with actual values.
206 Templates in the rules can use placeholders like:
207 "{display_name} exceeds maximum of {limit} (current: {value})"
209 We fill these in with the actual values from context.
210 Returns the raw template if formatting fails.
211 """
212 try:
213 return template.format(
214 display_name=ctx.get("displayName") or ctx.get("propertyKey"),
215 value=ctx.get("value"),
216 limit=finding.limit,
217 )
218 except Exception:
219 return template
222def _normalise_findings(raw_result: JsonValue | None) -> list[ZenFinding]:
223 """
224 Normalise ZEN output to typed findings (ZenFinding).
226 ZEN can return findings in different shapes:
227 - {"findings": [ {...}, {...} ]} -> Findings wrapped in a dict
228 - [ {...}, {...} ] -> Findings as a direct list
229 - None -> No findings at all
230 """
231 # If we got nothing, there's nothing to process
232 if raw_result is None: 232 ↛ 233line 232 didn't jump to line 233 because the condition on line 232 was never true
233 return []
235 items: list[object] = []
237 # If it's a dictionary, I need to check if findings are nested under a "findings" key
238 if isinstance(raw_result, dict): 238 ↛ 239line 238 didn't jump to line 239 because the condition on line 238 was never true
239 findings = raw_result.get("findings")
240 if isinstance(findings, list):
241 # Found the findings list, I'll use that
242 items = findings
243 else:
244 # No "findings" key or it's not a list - nothing useful here
245 return []
246 elif isinstance(raw_result, list): 246 ↛ 251line 246 didn't jump to line 251 because the condition on line 246 was always true
247 # It's already a list, so I can use it directly
248 items = raw_result
249 else:
250 # Unexpected shape - return empty list to be safe
251 return []
253 # Now I'll convert each item to a typed ZenFinding object
254 out: list[ZenFinding] = []
255 for item in items:
256 # Skip anything that's not a dict
257 if not isinstance(item, dict): 257 ↛ 258line 257 didn't jump to line 258 because the condition on line 257 was never true
258 continue
259 # Pydantic will validate and coerce types for me (e.g., limit -> float | None)
260 out.append(ZenFinding.model_validate(item))
261 return out
263def evaluate_rules(context: RuleContext) -> list[RuleFinding]:
264 """
265 Evaluate rules against a single context and return findings.
267 Args:
268 context: RuleContext from build_rule_context() containing property info
270 Returns:
271 List of RuleFinding objects for any rules that matched
272 """
273 # Get the compiled decision graph (loads/caches as needed)
274 decision = _get_decision()
276 # Run the context through the rules - ZEN returns something like:
277 # {"result": {"findings": [...]}, "performance": {...}}
278 result = decision.evaluate(context) # type: ignore[union-attr]
280 # Extract the actual result payload
281 payload = result.get("result") if isinstance(result, dict) else result
283 # Normalise to a list of finding dicts
284 rows = _normalise_findings(payload)
286 # Convert ZenFinding objects to RuleFinding objects, adding context info
287 out: list[RuleFinding] = []
288 for row in rows:
289 # Format the description template with actual values
290 description = _format_description(row.description, context, row)
292 # Build the finding, merging rule output with context
293 out.append(
294 RuleFinding(
295 id=row.id,
296 ruleReference=row.ruleReference or row.id,
297 severity=row.severity,
298 title=row.title,
299 description=description,
300 # These come from context, not the rule output
301 componentName=context.get("componentName"),
302 propertyKey=context.get("propertyKey"),
303 propertyId=context.get("propertyId"),
304 value=context["value"], # Always float from build_rule_context
305 limit=row.limit # Already float | None from ZenFinding
306 )
307 )
308 return out
311def evaluate_property_rules(obj: SimulationObject) -> list[RuleFinding]:
312 """
313 Evaluate all rules against all properties of a simulation object.
315 This is the main entry point - call this with a unit operation
316 and it will check all its properties against all configured rules.
318 Args:
319 obj: A SimulationObject (pump, heater, etc.) with properties
321 Returns:
322 List of RuleFinding objects
323 [
324 {
325 "id": "rule-001",
326 "severity": "warning",
327 "title": "Efficiency exceeds maximum",
328 "description": "Pump Efficiency exceeds maximum of 1.0 (current: 1.5)",
329 "componentName": "Pump-101",
330 "propertyKey": "efficiency_pump",
331 "value": 1.5,
332 "limit": 1.0
333 },
334 ...
335 ]
336 """
337 # Get the property set - bail early if none
338 prop_set = getattr(obj, "properties", None)
339 if not prop_set: 339 ↛ 340line 339 didn't jump to line 340 because the condition on line 339 was never true
340 return []
342 findings: list[RuleFinding] = []
344 # Loop through all properties on this object
345 for prop in prop_set.ContainedProperties.all():
346 # Each property can have multiple values (e.g., different scenarios)
347 for pv in prop.values.all():
348 # Convert to float for numeric comparison
350 value = float(pv.value) if pv.value is not None else None
351 if value is None: 351 ↛ 355line 351 didn't jump to line 355 because the condition on line 351 was always true
352 continue # Skip None values
354 # Build the context dict and evaluate rules
355 context = build_rule_context(obj, prop, value)
356 findings.extend(evaluate_rules(context))
358 return findings