Back to Models
model_1
v0.1.0
PythonMock model for testing the V2 model interface. Simulates training with 95% accuracy and generates sample inference results. Great reference implementation for building new models.
$ openuba install model_1 OpenUBA
python-base
License: Apache-2.0
mocktestingreferencev2-interface
Parameters
| Name | Type | Default | Description |
|---|---|---|---|
| sensitivity | float | 0.85 | Base risk score sensitivity |
model.yaml
1name: model_1
2version: 0.1.0
3runtime: python-base
4description: Mock model for testing V2 interface
5parameters:
6 sensitivity:
7 type: float
8 default: 0.85
9 description: Base risk score sensitivity
10MODEL.py
1from typing import Any, Dict, Optional
2import logging
3
4logger = logging.getLogger(__name__)
5
6class ModelContext:
7 '''
8 Mock context for local execution/testing if not provided by runner
9 '''
10 def __init__(self, df=None, params: Dict[str, Any] = None):
11 self.df = df
12 self.params = params or {}
13 self.logger = logger
14
15class Model:
16 def __init__(self):
17 self.model_state = {}
18
19 def train(self, ctx: Any) -> Dict[str, Any]:
20 '''
21 Train the model
22 '''
23 ctx.logger.info("model_1 v2 training...")
24 # Simulate training logic
25 self.model_state["status"] = "trained"
26 self.model_state["accuracy"] = 0.95
27
28 return {
29 "status": "success",
30 "metrics": {
31 "accuracy": 0.95,
32 "loss": 0.05
33 },
34 "artifacts": ["model.pt"] # Mock artifact list
35 }
36
37 def infer(self, ctx: Any, loaded_model: Any = None) -> Any:
38 '''
39 Run inference
40 '''
41 import pandas as pd
42 ctx.logger.info("model_1 v2 inference...")
43 # Simulate inference logic
44 results = []
45 for i in range(5):
46 results.append({
47 "user_id": f"user_{i}",
48 "risk_score": 0.85 + (i * 0.01),
49 "reason": "simulated_anomaly"
50 })
51
52 return pd.DataFrame(results)
53
54