{
  "project": "Dynexus",
  "version": "0.1.0",
  "timestamp": "2025-12-28T20:31:53.081059",
  "modules": {
    "dynexus/utils/logging.py": {
      "path": "dynexus/utils/logging.py",
      "module_docstring": "Structured logging.\n\nProvides research-grade logging with different verbosity levels,\nstructured output, and support for logging steps.",
      "imports": [
        {
          "module": "json",
          "alias": null
        },
        {
          "module": "logging",
          "alias": null
        },
        {
          "module": "sys",
          "alias": null
        },
        {
          "module": "datetime",
          "name": "datetime",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "timezone",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "IntEnum",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "cast",
          "alias": null,
          "level": 0
        }
      ],
      "classes": [
        {
          "name": "LogLevel",
          "bases": [
            "IntEnum"
          ],
          "docstring": "Extended logging levels.\n\nExtends standard logging levels with domain-specific for operations.",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "CausalLogger",
          "bases": [
            "logging.Logger"
          ],
          "docstring": "Extended logger with causal reasoning methods.\n\nAdds convenience methods for logging causal operations,\ninterventions, and reasoning steps with structured context.",
          "methods": [
            {
              "name": "trace",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "msg",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "args",
                  "type": "*args",
                  "annotation": "Any"
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Log at TRACE level."
            },
            {
              "name": "causal",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "msg",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "args",
                  "type": "*args",
                  "annotation": "Any"
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Log at CAUSAL level (causal operations)."
            },
            {
              "name": "reasoning",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "msg",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "args",
                  "type": "*args",
                  "annotation": "Any"
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Log at REASONING level (reasoning steps)."
            },
            {
              "name": "log_intervention",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "variable",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "old_value",
                  "type": "normal",
                  "annotation": "Any | None",
                  "default": null
                },
                {
                  "name": "reason",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Log an intervention (do-operation) with structured data.\n\nParameters\n----------\nvariable : str\n    Name or ID of the variable being intervened on.\nvalue : Any\n    New value after intervention.\nold_value : Any, optional\n    Previous value before intervention.\nreason : str, optional\n    Reason for the intervention.\n**context\n    Additional context data to log."
            },
            {
              "name": "log_counterfactual",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "scenario",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "actual",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "counterfactual",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "probability",
                  "type": "normal",
                  "annotation": "float | None",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Log a counterfactual reasoning step.\n\nParameters\n----------\nscenario : str\n    Description of the counterfactual scenario.\nactual : Any\n    Actual observed value.\ncounterfactual : Any\n    Counterfactual value.\nprobability : float, optional\n    Probability or confidence of the counterfactual.\n**context\n    Additional context data."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "JSONFormatter",
          "bases": [
            "logging.Formatter"
          ],
          "docstring": "Formatter that outputs JSON for structured logging.\n\nUseful for log aggregation and analysis in research workflows.",
          "methods": [
            {
              "name": "format",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "record",
                  "type": "normal",
                  "annotation": "logging.LogRecord",
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Format the log record as JSON.\n\nParameters\n----------\nrecord : logging.LogRecord\n    Log record to format.\n\nReturns\n-------\nstr\n    JSON string representation of the log record."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [
        {
          "name": "setup_logging",
          "args": [
            {
              "name": "level",
              "type": "normal",
              "annotation": "str | int",
              "default": null
            },
            {
              "name": "json_output",
              "type": "normal",
              "annotation": "bool",
              "default": null
            },
            {
              "name": "log_file",
              "type": "normal",
              "annotation": "str | None",
              "default": null
            },
            {
              "name": "module_levels",
              "type": "normal",
              "annotation": "dict[str, str | int] | None",
              "default": null
            }
          ],
          "returns": "None",
          "decorators": [],
          "is_async": false,
          "docstring": "Configure logging.\n\nParameters\n----------\nlevel : str or int, default='INFO'\n    Default logging level. Can be string name or integer value.\njson_output : bool, default=False\n    If True, output logs as JSON. If False, use human-readable format.\nlog_file : str, optional\n    If provided, also log to this file.\nmodule_levels : dict, optional\n    Specific logging levels for modules.\n    Example: {'dynexus.core': 'DEBUG', 'dynexus.solver': 'WARNING'}\n\nExamples\n--------\n>>> from dynexus import get_logger, setup_logging\n>>> setup_logging(level='CAUSAL', json_output=True)\n>>> logger = get_logger(__name__)\n>>> logger.causal(\"Starting causal inference\")"
        },
        {
          "name": "get_logger",
          "args": [
            {
              "name": "name",
              "type": "normal",
              "annotation": "str",
              "default": null
            }
          ],
          "returns": "CausalLogger",
          "decorators": [],
          "is_async": false,
          "docstring": "Get a logger instance with causal extensions.\n\nParameters\n----------\nname : str\n    Logger name, typically __name__.\n\nReturns\n-------\nCausalLogger\n    Logger instance with reasoning methods."
        }
      ],
      "global_vars": []
    },
    "dynexus/utils/config.py": {
      "path": "dynexus/utils/config.py",
      "module_docstring": "Configuration management.\n\nProvides hierarchical configuration with environment variable support,\ntype validation, and schema definition for research reproducibility.",
      "imports": [
        {
          "module": "os",
          "alias": null
        },
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "pathlib",
          "name": "Path",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "yaml",
          "alias": null
        }
      ],
      "classes": [
        {
          "name": "SolverConfig",
          "bases": [],
          "docstring": "Configuration for mathematical solvers.\n\nAttributes\n----------\node_tol : float\n    Tolerance for ODE solvers.\nsde_steps : int\n    Number of steps for SDE integration.\nmax_iterations : int\n    Maximum iterations for optimization solvers.\nparallel : bool\n    Enable parallel computation.\nworkers : int\n    Number of parallel workers.",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "RuntimeConfig",
          "bases": [],
          "docstring": "Configuration for causal runtime.\n\nAttributes\n----------\ncheck_contracts : bool\n    Enable contract checking at runtime.\nvalidate_state : bool\n    Validate state consistency each step.\nmax_history : int\n    Maximum number of historical states to keep.\nrealtime_factor : float\n    Speed factor for simulation (1.0 = realtime).\ncausal_consistency : bool\n    Enable causal consistency checking.",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "LoggingConfig",
          "bases": [],
          "docstring": "Configuration for logging.\n\nAttributes\n----------\nlevel : str\n    Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL).\njson_output : bool\n    Output logs as JSON.\nlog_file : Optional[str]\n    Path to log file.\nmodule_levels : dict[str, str]\n    Module-specific logging levels.",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "Config",
          "bases": [],
          "docstring": "Hierarchical configuration manager.\n\nSupports loading from multiple sources with precedence:\n1. Environment variables (DYNEXUS_*)\n2. Config file (YAML)\n3. Default values\n\nConfiguration is validated and type-safe.\n\nExamples\n--------\n>>> from dynexus import Config\n>>> config = Config()\n>>> config.solver.ode_tol\n1e-06\n\n>>> config.update_from_file('config.yaml')\n>>> config.update_from_env()",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "config_file",
                  "type": "normal",
                  "annotation": "str | Path | None",
                  "default": null
                },
                {
                  "name": "env_prefix",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize configuration.\n\nParameters\n----------\nconfig_file : str or Path, optional\n    Path to YAML configuration file.\nenv_prefix : str, default='DYNEXUS'\n    Prefix for environment variables."
            },
            {
              "name": "update_from_file",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "config_file",
                  "type": "normal",
                  "annotation": "str | Path",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Update configuration from YAML file.\n\nParameters\n----------\nconfig_file : str or Path\n    Path to YAML configuration file.\n\nRaises\n------\nFileNotFoundError\n    If config_file doesn't exist.\nImportError\n    If pyyaml is not installed.\nyaml.YAMLError\n    If YAML is malformed."
            },
            {
              "name": "update_from_env",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Update configuration from environment variables.\n\nEnvironment variables should be prefixed with env_prefix\nand use double underscore for nesting:\n\nDYNEXUS_SOLVER__ODE_TOL=1e-8\nDYNEXUS_LOGGING__LEVEL=DEBUG"
            },
            {
              "name": "_update_from_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "data",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Update configuration from dictionary.\n\nParameters\n----------\ndata : dict\n    Configuration data."
            },
            {
              "name": "_update_dataclass",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "dataclass_instance",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "updates",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Update a dataclass instance with dictionary values.\n\nParameters\n----------\ndataclass_instance\n    Dataclass instance to update.\nupdates : dict\n    Dictionary of updates."
            },
            {
              "name": "_convert_env_value",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "Any",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert environment variable string to appropriate type.\n\nParameters\n----------\nvalue : str\n    Environment variable value.\n\nReturns\n-------\nAny\n    Converted value."
            },
            {
              "name": "get",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "key",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "default",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "Any",
              "decorators": [],
              "is_async": false,
              "docstring": "Get configuration value by dot notation.\n\nParameters\n----------\nkey : str\n    Dot notation key (e.g., 'solver.ode_tol').\ndefault : Any, optional\n    Default value if key not found.\n\nReturns\n-------\nAny\n    Configuration value.\n\nExamples\n--------\n>>> from dynexus import Config\n>>> config = Config()\n>>> config.get('solver.ode_tol')\n1e-06\n\n>>> config.get('custom.setting', 'default')\n'default'"
            },
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert configuration to dictionary.\n\nReturns\n-------\ndict\n    Dictionary representation of configuration."
            },
            {
              "name": "save",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "config_file",
                  "type": "normal",
                  "annotation": "str | Path",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Save configuration to YAML file.\n\nParameters\n----------\nconfig_file : str or Path\n    Path to save configuration.\n\nRaises\n------\nImportError\n    If pyyaml is not installed."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": []
    },
    "dynexus/utils/id.py": {
      "path": "dynexus/utils/id.py",
      "module_docstring": "Unique identifier generation.\n\nProvides thread-safe, distributed-friendly ID generation for all objects.\nEnsures every Variable, Mechanism, Process, etc., has a globally unique identifier.",
      "imports": [
        {
          "module": "threading",
          "alias": null
        },
        {
          "module": "time",
          "alias": null
        },
        {
          "module": "uuid",
          "alias": null
        }
      ],
      "classes": [
        {
          "name": "IDGenerator",
          "bases": [],
          "docstring": "Thread-safe ID generator with multiple generation strategies.\n\nCombines timestamp, counter, and random components to ensure uniqueness\neven in distributed environments with clock skew.\n\nAttributes\n----------\nprefix : str\n    Prefix for all generated IDs (e.g., 'var', 'mech', 'proc').\ncounter : int\n    Monotonically increasing counter for sequence generation.\nlock : threading.Lock\n    Thread lock for safe concurrent access.\n\nExamples\n--------\n>>> from dynexus import IDGenerator\n>>> gen = IDGenerator('var')\n>>> gen.generate()  # doctest: +SKIP\n'var_1765249497673_1_69f161f2'\n\n>>> gen.generate(seed='custom')  # doctest: +SKIP\n'var_1765249516626_2_1736c2fe'",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "prefix",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize ID generator with a prefix.\n\nParameters\n----------\nprefix : str, default='obj'\n    Prefix for all generated IDs. Should be descriptive\n    (e.g., 'variable', 'mechanism', 'process')."
            },
            {
              "name": "generate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate a unique identifier.\n\nFormat: {prefix}_{timestamp}_{counter}_{unique}\n\nParameters\n----------\nseed : str, optional\n    Custom seed for the unique component. If None, a random\n    UUID hex string is used.\n\nReturns\n-------\nstr\n    Unique identifier string.\n\nNotes\n-----\nThread-safe and monotonic within a single generator instance.\nFor distributed uniqueness, combine with machine/process identifier\nin the prefix or seed."
            },
            {
              "name": "reset",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Reset the internal counter (mainly for testing)."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [
        {
          "name": "generate_id",
          "args": [
            {
              "name": "obj_type",
              "type": "normal",
              "annotation": "str",
              "default": null
            },
            {
              "name": "seed",
              "type": "normal",
              "annotation": "str | None",
              "default": null
            }
          ],
          "returns": "str",
          "decorators": [],
          "is_async": false,
          "docstring": "Generate a unique ID for an object type.\n\nConvenience function using pre-configured generators.\n\nParameters\n----------\nobj_type : str, default='object'\n    Type of object: 'variable', 'mechanism', 'process', 'contract',\n    or 'object' for generic.\nseed : str, optional\n    Custom seed for deterministic generation.\n\nReturns\n-------\nstr\n    Unique identifier.\n\nRaises\n------\nValueError\n    If obj_type is not recognized.\n\nExamples\n--------\n>>> from dynexus import generate_id\n>>> generate_id('variable')  # doctest: +SKIP\n'variable_1765250016697_1_73c7af78'\n\n>>> generate_id('mechanism', seed='metabolic_ode')  # doctest: +SKIP\n'mechanism_1765250038229_1_41682ee4'"
        }
      ],
      "global_vars": [
        {
          "name": "_VARIABLE_GEN",
          "annotation": null
        },
        {
          "name": "_PROPOSAL_GEN",
          "annotation": null
        },
        {
          "name": "_STATE_GEN",
          "annotation": null
        },
        {
          "name": "_MECHANISM_GEN",
          "annotation": null
        },
        {
          "name": "_PROCESS_GEN",
          "annotation": null
        },
        {
          "name": "_CONTRACT_GEN",
          "annotation": null
        }
      ]
    },
    "dynexus/utils/mixin.py": {
      "path": "dynexus/utils/mixin.py",
      "module_docstring": "Reusable mixin classes.\n\nProvides common functionality through inheritance to avoid code duplication\nand ensure consistent patterns across the codebase.",
      "imports": [
        {
          "module": "json",
          "alias": null
        },
        {
          "module": "abc",
          "name": "abstractmethod",
          "alias": null,
          "level": 0
        },
        {
          "module": "collections.abc",
          "name": "Callable",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "asdict",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "is_dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Generic",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "TypeVar",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "cast",
          "alias": null,
          "level": 0
        },
        {
          "module": "traceback",
          "alias": null
        }
      ],
      "classes": [
        {
          "name": "ObservableMixin",
          "bases": [],
          "docstring": "Mixin for observable objects that notify listeners of changes.\n\nImplements the observer pattern for state changes, interventions,\nand other events. Useful for Variables that need to notify\ndependent Mechanisms of value changes.\n\nAttributes\n----------\n_observers : dict[str, list[Callable[..., None]]]\n    Registered observers by event type.\n\nExamples\n--------\n>>> from dynexus import ObservableMixin\n>>> class Variable(ObservableMixin):\n...     def __init__(self):\n...         super().__init__()\n...         self.value = None\n...     def set_value(self, value):\n...         old = self.value\n...         self.value = value\n...         self.notify('value_changed', old=old, new=value)\n>>> var = Variable()\n>>> var.subscribe('value_changed', lambda **kwargs: print(kwargs))\n>>> var.set_value(42)  # prints: {'old': None, 'new': 42}",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize observable mixin."
            },
            {
              "name": "subscribe",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "event",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "callback",
                  "type": "normal",
                  "annotation": "Callable[..., None]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Subscribe to an event.\n\nParameters\n----------\nevent : str\n    Event type to subscribe to.\ncallback : Callable[..., None]\n    Function to call when event occurs.\n    Signature: callback(**kwargs) -> None"
            },
            {
              "name": "unsubscribe",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "event",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "callback",
                  "type": "normal",
                  "annotation": "Callable[..., None]",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Unsubscribe from an event.\n\nParameters\n----------\nevent : str\n    Event type to unsubscribe to.\ncallback : Callable[..., None]\n    Callback to remove.\n\nReturns\n-------\nbool\n    True if callback was removed, False if not found."
            },
            {
              "name": "notify",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "event",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Notify all observers of an event.\n\nParameters\n----------\nevent : str\n    Event type to notify.\n**kwargs\n    Event data passed to observers."
            },
            {
              "name": "clear_observers",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "event",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Clear all observers or observers for a specific event.\n\nParameters\n----------\nevent : str, optional\n    If provided, clear only observers for this event.\n    If None, clear all observers."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "SerializableMixin",
          "bases": [],
          "docstring": "Mixin for serializable objects.\n\nProvides methods for converting objects to/from JSON and dictionaries,\nwith support for dataclasses and custom serialization.\n\nExamples\n--------\n>>> from dataclasses import dataclass\n>>> from dynexus import SerializableMixin\n>>> @dataclass\n... class Point(SerializableMixin):\n...     x: float\n...     y: float\n>>> p = Point(1.0, 2.0)\n>>> p.to_dict()\n{'x': 1.0, 'y': 2.0}\n>>> p.to_json()\n'{\"x\": 1.0, \"y\": 2.0}'",
          "methods": [
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert object to dictionary.\n\nReturns\n-------\ndict\n    Dictionary representation.\n\nNotes\n-----\nFor dataclasses, uses dataclasses.asdict().\nFor other classes, uses __dict__ but filters private attributes."
            },
            {
              "name": "_serialize_value",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "Any",
              "decorators": [],
              "is_async": false,
              "docstring": "Serialize a value for dictionary representation.\n\nParameters\n----------\nvalue : Any\n    Value to serialize.\n\nReturns\n-------\nAny\n    Serializable representation."
            },
            {
              "name": "to_json",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "indent",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert object to JSON string.\n\nParameters\n----------\nindent : int, optional\n    JSON indentation.\n\nReturns\n-------\nstr\n    JSON string representation."
            }
          ],
          "class_methods": [
            {
              "name": "from_dict",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "data",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "'SerializableMixin'",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create object from dictionary.\n\nParameters\n----------\ndata : dict\n    Dictionary representation.\n\nReturns\n-------\nSerializableMixin\n    New instance.\n\nRaises\n------\nNotImplementedError\n    If class doesn't implement custom from_dict."
            },
            {
              "name": "from_json",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "json_str",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "'SerializableMixin'",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create object from JSON string.\n\nParameters\n----------\njson_str : str\n    JSON string representation.\n\nReturns\n-------\nSerializableMixin\n    New instance."
            }
          ],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "StatefulMixin",
          "bases": [
            "Generic[T]"
          ],
          "docstring": "Mixin for objects with state that can be saved and restored.\n\nProvides checkpoint/restore functionality for Variables, Mechanisms,\nand Processes. Useful for counterfactual reasoning and optimization\nwhere state needs to be rolled back.\n\nType Parameters\n---------------\nT\n    Type of the state representation.\n\nExamples\n--------\n>>> from dynexus import StatefulMixin\n>>> class Simulation(StatefulMixin[dict[str, float]]):\n...     def __init__(self):\n...         self.time = 0.0\n...         self.values = {'x': 0.0, 'y': 0.0}\n...     def get_state(self) -> dict[str, float]:\n...         return {'time': self.time, **self.values}\n...     def set_state(self, state: dict[str, float]) -> None:\n...         self.time = state['time']\n...         self.values = {k: state[k] for k in ['x', 'y']}\n>>> sim = Simulation()\n>>> checkpoint = sim.checkpoint()\n>>> sim.time = 10.0\n>>> sim.restore(checkpoint)\n>>> sim.time\n0.0",
          "methods": [
            {
              "name": "get_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "T",
              "decorators": [
                "abstractmethod"
              ],
              "is_async": false,
              "docstring": "Get current state.\n\nReturns\n-------\nT\n    State representation.\n\nNotes\n-----\nMust be implemented by subclass."
            },
            {
              "name": "set_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "state",
                  "type": "normal",
                  "annotation": "T",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [
                "abstractmethod"
              ],
              "is_async": false,
              "docstring": "Set state from representation.\n\nParameters\n----------\nstate : T\n    State representation.\n\nNotes\n-----\nMust be implemented by subclass."
            },
            {
              "name": "checkpoint",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "T",
              "decorators": [],
              "is_async": false,
              "docstring": "Create a checkpoint of current state.\n\nReturns\n-------\nT\n    State checkpoint."
            },
            {
              "name": "restore",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "checkpoint",
                  "type": "normal",
                  "annotation": "T",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Restore state from checkpoint.\n\nParameters\n----------\ncheckpoint : T\n    State checkpoint to restore."
            },
            {
              "name": "get_state_diff",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "other_state",
                  "type": "normal",
                  "annotation": "T",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get difference between current state and another state.\n\nParameters\n----------\nother_state : T\n    Other state to compare with.\n\nReturns\n-------\ndict\n    Dictionary of differences.\n\nNotes\n-----\nDefault implementation assumes state is a dictionary.\nOverride for custom state types."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": [
        {
          "name": "T",
          "annotation": null
        }
      ]
    },
    "dynexus/core/variable/reason.py": {
      "path": "dynexus/core/variable/reason.py",
      "module_docstring": "VariableReasoner for implementing reasoning capabilities on variables.\n\nThis module provides the core abstract base class for variables with autonomous\nreasoning capabilities. A VariableReasoner extends the base Variable class with\nmethods to perform analysis, anomaly detection, prediction, intervention\nplanning, and other reasoning tasks using its own historical data.\n\nThe architecture separates reasoning workflow logic (in this module) from\nstatistical computation (in math.statistics.utils), promoting code reuse and\nmaintainability across the framework.",
      "imports": [
        {
          "module": "__future__",
          "name": "annotations",
          "alias": null,
          "level": 0
        },
        {
          "module": "time",
          "alias": null
        },
        {
          "module": "abc",
          "name": "ABC",
          "alias": null,
          "level": 0
        },
        {
          "module": "abc",
          "name": "abstractmethod",
          "alias": null,
          "level": 0
        },
        {
          "module": "collections.abc",
          "name": "Callable",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "datetime",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "timezone",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "Enum",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "auto",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "numpy",
          "alias": "np"
        },
        {
          "module": "math.statistics.utils",
          "name": "StatisticalUtils",
          "alias": null,
          "level": 3
        },
        {
          "module": "math.statistics.utils",
          "name": "align_time_series",
          "alias": null,
          "level": 3
        },
        {
          "module": "math.statistics.utils",
          "name": "correlation_analysis",
          "alias": null,
          "level": 3
        },
        {
          "module": "math.statistics.utils",
          "name": "exponential_smoothing",
          "alias": null,
          "level": 3
        },
        {
          "module": "math.statistics.utils",
          "name": "linear_prediction",
          "alias": null,
          "level": 3
        },
        {
          "module": "math.statistics.utils",
          "name": "mann_kendall_test",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        },
        {
          "module": "base",
          "name": "BaseVariable",
          "alias": null,
          "level": 1
        },
        {
          "module": "base",
          "name": "VariableRole",
          "alias": null,
          "level": 1
        }
      ],
      "classes": [
        {
          "name": "ReasoningTask",
          "bases": [
            "Enum"
          ],
          "docstring": "Enumeration of reasoning task types.\n\nDefines the various types of reasoning operations that can be performed\non variables, enabling different analysis and inference capabilities.\n\nAttributes\n----------\nANOMALY_DETECTION : ReasoningTask\n    Detect unusual or unexpected values in variable data.\nCONFLICT_RESOLUTION : ReasoningTask\n    Resolve conflicting value proposals from multiple sources.\nVALUE_PREDICTION : ReasoningTask\n    Predict future values based on historical patterns.\nCAUSAL_DIAGNOSIS : ReasoningTask\n    Identify likely causes of observed variable states.\nINTERVENTION_PLANNING : ReasoningTask\n    Plan optimal interventions to achieve desired outcomes.\nUNCERTAINTY_QUANTIFICATION : ReasoningTask\n    Analyze and quantify uncertainty in variable values.\nCONSTRAINT_CHECKING : ReasoningTask\n    Verify constraints and validation rules.\nTREND_ANALYSIS : ReasoningTask\n    Analyze trends and patterns in historical data.\nPATTERN_RECOGNITION : ReasoningTask\n    Recognize recurring patterns in variable behavior.\nSENSITIVITY_ANALYSIS : ReasoningTask\n    Analyze sensitivity to parameter changes.\n\nNotes\n-----\nEach task type corresponds to a specific reasoning capability that\ncan be implemented by VariableReasoner subclasses. The enumeration\nprovides type safety and enables task-specific result handling.",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "ReasoningResult",
          "bases": [],
          "docstring": "Immutable record of a reasoning task result.\n\nCaptures the complete outcome of a reasoning operation, including\nthe task type, success status, result data, confidence metrics,\nexecution timing, and metadata. This dataclass serves as the\nstandardized return type for all reasoning methods.\n\nAttributes\n----------\ntask : ReasoningTask\n    Type of reasoning task that was performed.\nsuccess : bool\n    Whether the reasoning task completed successfully without errors.\nresult : Any\n    The primary result data. Structure varies by task type.\n    For VALUE_PREDICTION: dict with 'predicted_value' and 'prediction_bounds'\n    For CAUSAL_DIAGNOSIS: dict with 'cause_scores' mapping variable names to confidence\n    For ANOMALY_DETECTION: dict with 'is_anomaly' boolean and 'severity' float\nconfidence : float\n    Confidence in the result, normalized to [0.0, 1.0] where 1.0 is maximum confidence.\nmetadata : dict[str, Any]\n    Additional metadata about the reasoning process, including method parameters,\n    computational details, and diagnostic information.\ntimestamp : datetime\n    UTC timestamp when the reasoning was performed.\nexecution_time_ms : Optional[float]\n    Time taken to execute the reasoning task in milliseconds.\n\nExamples\n--------\n>>> from datetime import datetime, timezone\n>>> from dynexus import ReasoningResult, ReasoningTask\n>>>\n>>> # Create a successful anomaly detection result\n>>> result = ReasoningResult(\n...     task=ReasoningTask.ANOMALY_DETECTION,\n...     success=True,\n...     result={\"is_anomaly\": True, \"severity\": 0.85},\n...     confidence=0.92,\n...     metadata={\"method\": \"isolation_forest\", \"threshold\": 0.95},\n...     timestamp=datetime.now(timezone.utc),\n...     execution_time_ms=45.2\n... )\n>>>\n>>> # Create a failed prediction result\n>>> failed_result = ReasoningResult(\n...     task=ReasoningTask.VALUE_PREDICTION,\n...     success=False,\n...     result={\"error\": \"Insufficient historical data\"},\n...     confidence=0.0,\n...     metadata={\"available_points\": 1, \"required_points\": 3},\n...     timestamp=datetime.now(timezone.utc)\n... )",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "VariableReasoner",
          "bases": [
            "BaseVariable",
            "ABC"
          ],
          "docstring": "Abstract base class for variable reasoning capabilities.\n\nA VariableReasoner is a specialized Variable that can perform autonomous\nreasoning operations on its own state and history. It inherits all Variable\ncapabilities while adding advanced methods for analysis, prediction, diagnosis,\nand intervention planning.\n\nThe class follows a modular architecture where reasoning workflows are\nimplemented here, while statistical computations are delegated to the\n`dynexus.math.statistics.utils` module. This separation of concerns\npromotes code reuse and maintainability.\n\nKey Features\n------------\n1. **Self-Contained Reasoning**: Uses the variable's own history for analysis\n2. **Multiple Reasoning Modalities**: Supports prediction, diagnosis, intervention, etc.\n3. **Results History**: Tracks all reasoning operations with timestamps and confidence\n4. **Configurable Parameters**: Allows tuning of reasoning algorithms via configuration\n5. **Integration with Variable System**: Full compatibility with base Variable state management\n\nSubclassing Notes\n----------------\n- **Must implement**: `detect_anomaly()` and `resolve_conflict()` (abstract methods)\n- **Should implement**: Domain-specific versions of other reasoning methods as needed\n- **Configuration**: Use `_setup_default_config()` to establish default parameters\n- **Result Recording**: Always call `record_result()` after each reasoning operation\n\nExamples\n--------\n>>> from dynexus import VariableReasoner, ReasoningTask, VariableRole\n>>>\n>>> class TemperatureAnalyzer(VariableReasoner):\n...     def detect_anomaly(self) -> ReasoningResult:\n...         # Custom anomaly detection for temperature data\n...         history = self.get_history(n=20)\n...         values = [val for _, val, _ in history if val is not None]\n...\n...         if len(values) < 5:\n...             return self._create_failed_result(\n...                 task=ReasoningTask.ANOMALY_DETECTION,\n...                 error=\"Insufficient temperature data\"\n...             )\n...\n...         # Custom anomaly logic using domain knowledge\n...         current_temp = self.value\n...         avg_temp = sum(values) / len(values)\n...         is_anomaly = abs(current_temp - avg_temp) > 5.0  # 5°C threshold\n...\n...         result = ReasoningResult(\n...             task=ReasoningTask.ANOMALY_DETECTION,\n...             success=True,\n...             result={\"is_anomaly\": is_anomaly, \"severity\": 0.7},\n...             confidence=0.85,\n...             metadata={\"method\": \"temperature_threshold\", \"threshold\": 5.0}\n...         )\n...         self.record_result(result)\n...         return result\n...\n...     def resolve_conflict(self, proposals):\n...         # Custom conflict resolution for temperature readings\n...         # ... implementation ...\n...         pass\n...\n...     def validate(self, value: float) -> bool:\n...         return 0 <= value <= 100\n>>>\n>>> # Create and use the reasoner\n>>> temp_reasoner = TemperatureAnalyzer(\n...     name=\"room_temperature\",\n...     description=\"Analyzes room temperature patterns and anomalies\",\n...     units=\"°C\",\n...     role=VariableRole.ENDOGENOUS,\n...     initial_value=22.0,\n...     initial_confidence=0.8\n... )\n>>>\n>>> # Update with temperature readings\n>>> temp_reasoner.update(22.5, confidence=0.9)\n>>> temp_reasoner.update(23.0, confidence=0.85)\n>>> temp_reasoner.update(23.5, confidence=0.8)\n>>> temp_reasoner.update(30.0, confidence=0.7)  # Possibly anomalous\n>>>\n>>> # Perform reasoning\n>>> anomaly_result = temp_reasoner.detect_anomaly()\n>>> if anomaly_result.success and anomaly_result.result[\"is_anomaly\"]:\n...     print(f\"Temperature anomaly detected with confidence {anomaly_result.confidence:.2f}\")\n...     print(f\"Severity: {anomaly_result.result['severity']:.2f}\")",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "initial_value",
                  "type": "normal",
                  "annotation": "Any | None",
                  "default": null
                },
                {
                  "name": "initial_confidence",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": null,
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a VariableReasoner instance.\n\nExtends the base Variable initialization with reasoning-specific\nattributes and configuration setup.\n\nParameters\n----------\nname : str\n    Unique name identifier for the reasoner/variable.\ndescription : str, optional\n    Human-readable description of what the reasoner does and represents.\nunits : Optional[str], optional\n    Measurement units for the variable's values (e.g., \"°C\", \"m/s\", \"USD\").\nrole : VariableRole, optional\n    Causal role of the variable within the system (ENDOGENOUS, EXOGENOUS, etc.).\ninitial_value : Optional[Any], optional\n    Initial value for the variable. Must be compatible with validation rules.\ninitial_confidence : float, optional\n    Initial confidence in the value, normalized to [0.0, 1.0].\nconfig : Optional[dict[str, Any]], optional\n    Configuration dictionary for reasoning parameters. Will be merged with\n    defaults from `_setup_default_config()`.\n\nRaises\n------\nValueError\n    If `initial_confidence` is not in the range [0.0, 1.0].\n\nNotes\n-----\nThe initialization process:\n1. Calls parent BaseVariable.__init__ with all standard variable parameters\n2. Initializes reasoning-specific attributes (results_history, reasoning_config)\n3. Sets up default configuration values for all reasoning tasks\n4. Logs the initialization for debugging and monitoring\n\nExamples\n--------\n>>> from dynexus import VariableReasoner, VariableRole\n>>>\n>>> # Basic initialization\n>>> class SmartPressureReasoner(VariableReasoner):\n...     def __init__(\n...             self,\n...             name,\n...             description = \"\",\n...             units = None,\n...             initial_value = None,\n...             initial_confidence = 0.5,\n...             config = None\n...         ):\n...             super().__init__(\n...                 name,\n...                 description,\n...                 units,\n...                 VariableRole.ENDOGENOUS,\n...                 initial_value,\n...                 initial_confidence,\n...                 config\n...             )\n...\n...     def detect_anomaly(self):\n...         return super().detect_anomaly()\n...\n...     def resolve_conflict(self, proposals):\n...         return super().resolve_conflict(proposals)\n...\n...     def validate(self, value: float) -> bool:\n...         return 0 <= value <= 200_000\n>>>\n>>> reasoner = SmartPressureReasoner(\n...     name=\"pressure_sensor\",\n...     description=\"Monitors and analyzes pressure readings\",\n...     units=\"Pa\",\n...     initial_value=101325.0,\n...     initial_confidence=0.9,\n... )\n>>>\n>>> # With custom reasoning configuration\n>>> custom_config = {\n...     \"prediction\": {\"method\": \"exponential\", \"max_history\": 100},\n...     \"anomaly_detection\": {\"threshold\": 2.5}\n... }\n>>> configured_reasoner = SmartPressureReasoner(\n...     name=\"custom_analyzer\",\n...     config=custom_config\n... )"
            },
            {
              "name": "_setup_default_config",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Set up default configuration for reasoning tasks.\n\nEstablishes sensible defaults for all reasoning parameters. User-provided\nconfiguration (from __init__) takes precedence over these defaults.\n\nThe configuration is organized hierarchically by reasoning task category:\n- anomaly_detection: Parameters for anomaly detection algorithms\n- prediction: Parameters for value prediction methods\n- trend_analysis: Parameters for trend detection and analysis\n- uncertainty: Parameters for uncertainty quantification\n- constraints: Domain-specific constraint definitions\n\nNotes\n-----\nDefault values are chosen based on general statistical practice:\n- Z-score threshold of 3.0 corresponds to ~99.7% confidence for normal data\n- Bootstrap sample size of 1000 provides stable confidence intervals\n- Exponential smoothing alpha=0.3 balances responsiveness and stability\n\nSubclasses should override this method to provide domain-specific defaults."
            },
            {
              "name": "detect_anomaly",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [
                "abstractmethod"
              ],
              "is_async": false,
              "docstring": "Detect anomalies in the variable's own historical values.\n\nThis abstract method must be implemented by all subclasses to provide\ndomain-specific anomaly detection logic. Implementations should analyze\nthe variable's history (available via `self.get_history()`) to identify\nunusual patterns, outliers, or unexpected behavior.\n\nReturns\n-------\nReasoningResult\n    Result containing anomaly detection outcome. The `result` field should\n    be a dictionary with at minimum:\n    - 'is_anomaly': bool indicating if an anomaly was detected\n    - 'severity': float in [0.0, 1.0] indicating anomaly severity\n    Additional fields may include:\n    - 'anomaly_type': str categorizing the anomaly\n    - 'expected_range': tuple of (lower, upper) expected values\n    - 'deviation': float magnitude of deviation from expected\n\nRaises\n------\nNotImplementedError\n    If a subclass does not implement this method.\n\nNotes\n-----\nCommon anomaly detection approaches include:\n- Statistical methods: Z-score, modified Z-score, IQR\n- Machine learning: Isolation Forest, One-Class SVM, Autoencoders\n- Time-series specific: STL decomposition, seasonal ESD\n- Domain-specific: Rule-based systems, physical constraints\n\nThe method should handle edge cases gracefully:\n- Insufficient historical data (return failed result)\n- Non-numeric data types (implement appropriate detection)\n- Missing values (impute or handle explicitly)\n\nExamples\n--------\n>>> class ZScoreAnomalyDetector(VariableReasoner):\n...     def detect_anomaly(self) -> ReasoningResult:\n...         import time\n...         start_time = time.time()\n...\n...         try:\n...             # Get configuration\n...             threshold = self.reasoning_config['anomaly_detection']['threshold']\n...             window = self.reasoning_config['anomaly_detection']['window_size']\n...\n...             # Get historical data\n...             history = self.get_history(n=window)\n...             if len(history) < 2:\n...                 return self._create_failed_result(\n...                     task=ReasoningTask.ANOMALY_DETECTION,\n...                     error=\"Insufficient history for anomaly detection\"\n...                 )\n...\n...             # Extract values\n...             values = [val for _, val, _ in history if val is not None]\n...             current_value = self.value\n...\n...             # Calculate z-score\n...             mean_val = np.mean(values)\n...             std_val = np.std(values) if len(values) > 1 else 0.0\n...\n...             if std_val > 0:\n...                 z_score = abs((current_value - mean_val) / std_val)\n...                 is_anomaly = z_score > threshold\n...                 severity = min(1.0, z_score / (threshold * 2))\n...             else:\n...                 z_score = 0.0\n...                 is_anomaly = False\n...                 severity = 0.0\n...\n...             execution_time = (time.time() - start_time) * 1000\n...\n...             result = ReasoningResult(\n...                 task=ReasoningTask.ANOMALY_DETECTION,\n...                 success=True,\n...                 result={\n...                     \"is_anomaly\": is_anomaly,\n...                     \"severity\": severity,\n...                     \"z_score\": z_score\n...                 },\n...                 confidence=0.9 if std_val > 0 else 0.5,\n...                 metadata={\n...                     \"method\": \"z_score\",\n...                     \"threshold\": threshold,\n...                     \"window_size\": window,\n...                     \"execution_time_ms\": execution_time\n...                 },\n...                 execution_time_ms=execution_time\n...             )\n...\n...             self.record_result(result)\n...             return result\n...\n...         except Exception as e:\n...             logger.error(f\"Anomaly detection failed: {str(e)}\")\n...             return self._create_failed_result(\n...                 task=ReasoningTask.ANOMALY_DETECTION,\n...                 error=str(e)\n...             )"
            },
            {
              "name": "resolve_conflict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[tuple[Any, float, str]]",
                  "default": null
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [
                "abstractmethod"
              ],
              "is_async": false,
              "docstring": "Resolve conflicts between multiple value proposals.\n\nThis abstract method must be implemented by all subclasses to provide\nconflict resolution logic when multiple sources propose different values\nfor this variable. This is particularly important in multi-agent systems\nor when integrating data from heterogeneous sources.\n\nParameters\n----------\nproposals : list[tuple[Any, float, str]]\n    List of proposed values as tuples of:\n    - value: The proposed value (any type)\n    - confidence: Confidence in the proposal [0.0, 1.0]\n    - source_name: Identifier for the proposal source\n\nReturns\n-------\nReasoningResult\n    Result containing the resolved value. The `result` field should\n    be a dictionary with at minimum:\n    - 'resolved_value': The chosen value after conflict resolution\n    - 'resolution_method': str describing the method used\n    - 'source_contributions': dict mapping source names to their influence\n    Additional fields may include:\n    - 'consensus_degree': float indicating level of agreement\n    - 'rejected_proposals': list of proposals not selected\n\nRaises\n------\nValueError\n    If `proposals` list is empty.\nNotImplementedError\n    If a subclass does not implement this method.\n\nNotes\n-----\nCommon conflict resolution strategies include:\n- **Weighted Average**: Average weighted by confidence scores\n- **Maximum Confidence**: Select proposal with highest confidence\n- **Majority Voting**: For discrete/categorical values\n- **Temporal Recency**: Weight by timestamp or update frequency\n- **Expert Priority**: Apply source-specific priority rules\n- **Consensus Building**: Iterative negotiation toward agreement\n\nThe method should consider:\n- Value compatibility (can values be meaningfully combined?)\n- Source reliability (historical accuracy of each source)\n- Contextual factors (current system state, constraints)\n\nExamples\n--------\n>>> class WeightedConflictResolver(VariableReasoner):\n...     def resolve_conflict(self, proposals):\n...         import time\n...         start_time = time.time()\n...\n...         if not proposals:\n...             raise ValueError(\"Empty proposals list\")\n...\n...         # Separate components\n...         values = [p[0] for p in proposals]\n...         confidences = [p[1] for p in proposals]\n...         sources = [p[2] for p in proposals]\n...\n...         # Check if all values are numeric\n...         try:\n...             numeric_values = [float(v) for v in values]\n...\n...             # Weighted average resolution\n...             total_weight = sum(confidences)\n...             if total_weight > 0:\n...                 resolved = sum(v * c for v, c in zip(numeric_values, confidences)) / total_weight\n...                 resolution_method = \"confidence_weighted_average\"\n...             else:\n...                 resolved = np.mean(numeric_values)\n...                 resolution_method = \"simple_average\"\n...\n...             # Calculate consensus metrics\n...             source_contributions = {\n...                 source: conf / total_weight if total_weight > 0 else 1.0/len(confidences)\n...                 for source, conf in zip(sources, confidences)\n...             }\n...\n...             execution_time = (time.time() - start_time) * 1000\n...\n...             result = ReasoningResult(\n...                 task=ReasoningTask.CONFLICT_RESOLUTION,\n...                 success=True,\n...                 result={\n...                     \"resolved_value\": resolved,\n...                     \"resolution_method\": resolution_method,\n...                     \"source_contributions\": source_contributions,\n...                     \"consensus_degree\": min(confidences) / max(confidences) if max(confidences) > 0 else 0.0\n...                 },\n...                 confidence=np.mean(confidences),\n...                 metadata={\n...                     \"proposal_count\": len(proposals),\n...                     \"execution_time_ms\": execution_time,\n...                     \"value_type\": \"numeric\"\n...                 },\n...                 execution_time_ms=execution_time\n...             )\n...\n...             self.record_result(result)\n...             return result\n...\n...         except (TypeError, ValueError):\n...             # Non-numeric values: use maximum confidence strategy\n...             max_idx = np.argmax(confidences)\n...             resolved = values[max_idx]\n...\n...             execution_time = (time.time() - start_time) * 1000\n...\n...             result = ReasoningResult(\n...                 task=ReasoningTask.CONFLICT_RESOLUTION,\n...                 success=True,\n...                 result={\n...                     \"resolved_value\": resolved,\n...                     \"resolution_method\": \"maximum_confidence\",\n...                     \"selected_source\": sources[max_idx],\n...                     \"selected_confidence\": confidences[max_idx]\n...                 },\n...                 confidence=confidences[max_idx],\n...                 metadata={\n...                     \"proposal_count\": len(proposals),\n...                     \"execution_time_ms\": execution_time,\n...                     \"value_type\": \"non_numeric\"\n...                 },\n...                 execution_time_ms=execution_time\n...             )\n...\n...             self.record_result(result)\n...             return result"
            },
            {
              "name": "predict_value",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "steps_ahead",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Predict future values based on the variable's historical data.\n\nUses statistical methods from `dynexus.math.statistics.utils` to\nforecast future values. The default implementation supports both\nlinear regression and exponential smoothing, with automatic fallback\nto simpler methods when data is insufficient.\n\nParameters\n----------\nsteps_ahead : int, optional\n    Number of time steps to predict into the future. Must be ≥ 1.\n**kwargs\n    Additional prediction parameters:\n    - method : str\n        Prediction method: 'linear' (default) or 'exponential'\n    - confidence_interval : float\n        Confidence level for prediction bounds (default: 0.95)\n    - max_history : int\n        Maximum number of historical points to use (default: from config)\n    - alpha : float\n        Smoothing factor for exponential method (default: 0.3)\n\nReturns\n-------\nReasoningResult\n    Result containing prediction outcome. The `result` field is a\n    dictionary with:\n    - 'predicted_value': The forecasted future value\n    - 'prediction_bounds': Tuple of (lower_bound, upper_bound)\n    - Additional method-specific statistics (slope, r_squared, etc.)\n\nRaises\n------\nValueError\n    If `steps_ahead` is less than 1.\n\nNotes\n-----\n**Method Selection**:\n- 'linear': Linear regression - best for trends with constant slope\n- 'exponential': Exponential smoothing - best for noisy data with local trends\n\n**Data Requirements**:\n- Linear regression: Minimum 2 data points\n- Exponential smoothing: Minimum 2 data points (more for stable estimates)\n\n**Confidence Intervals**:\nConfidence bounds are computed differently for each method:\n- Linear: Based on t-distribution of regression residuals\n- Exponential: Based on smoothed residual standard deviation\n\nThe method automatically handles insufficient data by returning a\nfailed result with appropriate metadata.\n\nExamples\n--------\n>>> from dynexus import VariableReasoner\n>>>\n>>> reasoner = VariableReasoner(name=\"stock_price\")\n>>> reasoner.update(100.0); reasoner.update(101.0); reasoner.update(102.5)\n>>>\n>>> # Linear prediction\n>>> linear_result = reasoner.predict_value(steps_ahead=2, method='linear')\n>>> if linear_result.success:\n...     pred = linear_result.result['predicted_value']\n...     lower, upper = linear_result.result['prediction_bounds']\n...     print(f\"Predicted: {pred:.2f} (95% CI: [{lower:.2f}, {upper:.2f}])\")\n...\n>>> # Exponential smoothing prediction\n>>> exp_result = reasoner.predict_value(steps_ahead=1, method='exponential', alpha=0.2)\n>>> if exp_result.success:\n...     print(f\"Exponential prediction: {exp_result.result['predicted_value']:.2f}\")\n...     print(f\"Confidence: {exp_result.confidence:.2f}\")"
            },
            {
              "name": "diagnose_causes",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "candidate_causes",
                  "type": "normal",
                  "annotation": "list[BaseVariable]",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Diagnose likely causes of the variable's current state.\n\nAnalyzes statistical relationships between this variable and candidate\ncause variables using correlation analysis, Granger causality (proxy),\nor other relationship metrics. The method aligns time series, computes\nrelationship scores, and returns normalized confidence scores for each\ncandidate cause.\n\nParameters\n----------\ncandidate_causes : list[BaseVariable]\n    List of candidate cause variables to analyze. Each must be a Variable\n    instance with accessible history via `get_history()`.\n**kwargs\n    Additional diagnostic parameters:\n    - method : str\n        Relationship metric: 'correlation'\n        (default), 'granger', 'transfer_entropy'\n    - significance_level : float\n        Statistical significance threshold (default: 0.05)\n    - lag_max : int\n        Maximum lag for time series analysis (default: 5)\n    - min_aligned_points : int\n        Minimum aligned data points required (default: 3)\n    - history_points\n        History points (default: 50)\n\nReturns\n-------\nReasoningResult\n    Result containing causal diagnosis. The `result` field is a\n    dictionary with:\n    - 'cause_scores': dict mapping variable names to normalized\n      confidence scores [0, 1]\n    - 'cause_details': dict with additional information per candidate\n    - 'top_cause': name of the variable with highest score (or None)\n\nNotes\n-----\n**Relationship Metrics**:\n- 'correlation': Pearson correlation with significance testing\n- 'granger': Simplified Granger causality proxy (cross-correlation based)\n- 'transfer_entropy': Currently uses correlation as proxy (placeholder)\n\n**Method Details**:\n1. Time series alignment using `align_time_series()` from statistics module\n2. Relationship score calculation using chosen metric\n3. Score normalization to [0, 1] range\n4. Confidence based on maximum relationship score\n\n**Limitations**:\n- This provides **correlational evidence**, not causal proof\n- True causal inference requires domain knowledge and experimental design\n- The 'granger' method is a simplified proxy, not full Granger causality\n\n**Best Practices**:\n- Use domain knowledge to select plausible candidate causes\n- Consider temporal ordering (causes should precede effects)\n- Account for potential confounding variables\n- Use multiple methods and triangulate results\n\nExamples\n--------\n>>> from dynexus import BaseVariable\n>>>\n>>> # Create candidate causes\n>>> class VariableCause(BaseVariable):\n...     def validate(self, value: float) -> bool:\n...         return 0 <= value <= 100\n>>> cause1 = VariableCause(name=\"advertising\", initial_value=50)\n>>> cause2 = VariableCause(name=\"competition\", initial_value=30)\n>>> cause3 = VariableCause(name=\"seasonality\", initial_value=0.5)\n>>>\n>>> # Update histories (simulated data)\n>>> for i in range(20):\n...     reasoner.update(100 + i*5 + np.random.normal(0, 10))\n...     cause1.update(50 + i*2 + np.random.normal(0, 5))\n...     cause2.update(30 + np.random.normal(0, 3))\n...     cause3.update(0.5 + 0.1*np.sin(i/3.14))\n...\n>>> # Diagnose causes\n>>> candidates = [cause1, cause2, cause3]\n>>> result = reasoner.diagnose_causes(candidates, method='correlation')\n>>>\n>>> if result.success:\n...     print(\"Cause scores:\")\n...     for cause, score in result.result['cause_scores'].items():\n...         print(f\"  {cause}: {score:.3f}\")\n...     top_cause = result.result['top_cause']\n...     print(f\"Top candidate: {top_cause}\")"
            },
            {
              "name": "plan_intervention",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "target_value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "constraints",
                  "type": "normal",
                  "annotation": "list[Callable[[Any], bool]] | None",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Plan an intervention to achieve a target value.\n\nCreates a structured intervention plan considering the current state,\ndistance to target, constraints, risk factors, and system dynamics.\nThe plan includes step-by-step actions, timing recommendations,\nconfidence estimates, and risk assessments.\n\nParameters\n----------\ntarget_value : Any\n    Desired target value for the variable. Must be valid according to\n    the variable's validation rules.\nconstraints : Optional[list[Callable[[Any], bool]]]\n    List of constraint functions that must be satisfied. Each function\n    should accept a candidate value and return True if the constraint\n    is satisfied. Constraints are checked for both the target value\n    and intermediate steps.\n**kwargs\n    Additional planning parameters:\n    - max_steps : int\n        Maximum number of intervention steps (default: 3)\n    - risk_tolerance : float\n        Acceptable risk level [0.0, 1.0] (default: 0.3)\n    - urgency : str\n        Intervention urgency: 'low', 'medium' (default), or 'high'\n    - auto_execute : bool\n        Automatically execute the plan if confidence > 0.7 (default: False)\n    - required_confidence : float\n        Minimum confidence required for target state (default: 0.8)\n\nReturns\n-------\nReasoningResult\n    Result containing intervention plan. The `result` field is a\n    dictionary with:\n    - 'intervention_steps': List of step dictionaries with actions and targets\n    - 'current_state': Snapshot of current variable state\n    - 'target_state': Description of desired end state\n    - 'estimated_risk': Risk assessment with acceptability\n    - 'expected_duration': Estimated time to complete intervention\n    - 'monitoring_requirements': Suggested monitoring parameters\n\nNotes\n-----\n**Planning Process**:\n1. **Validation**: Check target value validity and constraint satisfaction\n2. **Distance Calculation**: Compute normalized distance from current to target\n3. **Step Generation**: Create gradual steps based on distance and complexity\n4. **Risk Assessment**: Estimate risks considering distance and urgency\n5. **Confidence Calculation**: Compute overall plan confidence\n\n**Step Types**:\n- 'direct_set': Single-step immediate change (for small distances)\n- 'gradual_adjustment': Multi-step incremental changes\n- 'stabilization': Include stabilization periods between changes\n\n**Risk Factors**:\n- Value distance (larger changes = higher risk)\n- System volatility (historical stability)\n- Constraint complexity (number and strictness of constraints)\n- Urgency requirements (higher urgency may increase risk)\n\n**Automatic Execution**:\nWhen `auto_execute=True` and plan confidence > 0.7, the plan is\nautomatically executed by calling `_execute_intervention_plan()`.\nThis should be used cautiously in production systems.\n\nExamples\n--------\n>>>\n>>> reasoner.add_constraint(lambda x: 250 <= x <= 400)  # Safety constraint\n>>>\n>>> # Plan intervention to reach 350 degrees\n>>> result = reasoner.plan_intervention(\n...     target_value=350.0,\n...     constraints=[lambda x: x <= 380],  # Additional constraint\n...     max_steps=4,\n...     risk_tolerance=0.2,\n...     urgency='medium'\n... )\n>>>\n>>> if result.success:\n...     plan = result.result\n...     print(f\"Plan confidence: {result.confidence:.2f}\")\n...     print(f\"Steps: {len(plan['intervention_steps'])}\")\n...     print(f\"Estimated risk: {plan['estimated_risk']['risk_level']}\")\n...     print(f\"Acceptable: {plan['estimated_risk']['acceptable']}\")\n...\n...     # Execute the plan manually\n...     if result.confidence > 0.8:\n...         reasoner._execute_intervention_plan(plan)"
            },
            {
              "name": "quantify_uncertainty",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Quantify and analyze uncertainty in the variable's value.\n\nPerforms comprehensive uncertainty analysis using methods from the\nstatistics module. Includes bootstrap analysis for sufficient data\nor basic statistical analysis for smaller samples. Provides confidence\nintervals, uncertainty sources, and recommendations for uncertainty\nreduction.\n\nParameters\n----------\n**kwargs\n    Additional uncertainty analysis parameters:\n    - method : str\n        Analysis method: 'bootstrap' (default for n≥10), 'statistical'\n    - confidence_level : float\n        Confidence interval level (default: 0.95)\n    - samples : int\n        Number of bootstrap samples (default: 1000)\n    - include_current : bool\n        Include current state in analysis (default: True)\n\nReturns\n-------\nReasoningResult\n    Result containing uncertainty analysis. The `result` field is a\n    dictionary with:\n    - Analysis results (bootstrap or statistical)\n    - 'confidence_distribution': Statistics on historical confidence values\n    - 'current_state': Snapshot of current variable state with uncertainty\n    - 'recommendations': Suggestions for uncertainty reduction\n\nNotes\n-----\n**Analysis Methods**:\n- **Bootstrap Analysis**: Resampling-based confidence intervals.\n  Requires ≥10 data points. More robust to non-normality.\n- **Statistical Analysis**: Traditional confidence intervals based on\n  normality assumption. Used when data <10 points.\n\n**Output Metrics**:\n- Confidence intervals for the variable's mean value\n- Standard error and coefficient of variation\n- Historical confidence distribution statistics\n- Current uncertainty level relative to historical patterns\n\n**Confidence Calculation**:\nOverall confidence in the uncertainty analysis combines:\n1. Sample size adequacy (more data = higher confidence)\n2. Interval width (narrower intervals = higher confidence)\n3. Consistency of confidence values over time\n\n**Recommendations**:\nThe analysis may suggest actions to reduce uncertainty:\n- Collect more data points\n- Increase measurement frequency\n- Improve measurement precision\n- Investigate sources of high variability\n\nExamples\n--------\n>>> import numpy as np\n>>>\n>>> # Generate simulated historical data with varying confidence\n>>> for i in range(25):\n...     value = 10.0 + np.random.normal(0, 2.0)  # Mean 10, std 2\n...     confidence = 0.7 + 0.3 * np.random.random()  # Varying confidence\n...     reasoner.update(value, confidence=confidence)\n...\n>>> # Perform uncertainty quantification\n>>> result = reasoner.quantify_uncertainty(\n...     method='bootstrap',\n...     confidence_level=0.90,\n...     samples=2000\n... )\n>>>\n>>> if result.success:\n...     analysis = result.result\n...     print(f\"Mean estimate: {analysis.get('mean_estimate', 'N/A'):.3f}\")\n...     print(f\"90% Confidence Interval: {analysis.get('confidence_interval', (0, 0))}\")\n...     print(f\"Analysis confidence: {result.confidence:.3f}\")\n...\n...     # Check current state\n...     current = analysis.get('current_state', {})\n...     print(f\"Current value: {current.get('value', 'N/A'):.3f}\")\n...     print(f\"Current uncertainty: {current.get('uncertainty', 'N/A'):.3f}\")"
            },
            {
              "name": "analyze_trends",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "window",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Analyze trends in the variable's historical data.\n\nPerforms comprehensive trend analysis using methods from the statistics\nmodule. Supports linear regression, Mann-Kendall test, and seasonal\nanalysis. Includes direction detection, strength quantification,\nstatistical significance testing, and additional analyses like\nvolatility assessment and change point detection.\n\nParameters\n----------\nwindow : Optional[int]\n    Number of recent points to analyze. If None, uses all available\n    history up to a reasonable limit (100 points).\n**kwargs\n    Additional trend analysis parameters:\n    - method : str\n        Trend detection method: 'linear', 'mann_kendall', or 'seasonal'\n    - seasonality_period : int\n        Period for seasonal analysis (default: 24 for hourly data)\n    - detect_changepoints : bool\n        Whether to detect change points (default: False)\n    - significance_level : float\n        Statistical significance threshold (default: 0.05)\n\nReturns\n-------\nReasoningResult\n    Result containing trend analysis. The `result` field is a dictionary\n    with comprehensive trend information including:\n    - 'direction': Trend direction (increasing, decreasing, no trend)\n    - 'strength': Normalized trend strength [0.0, 1.0]\n    - 'significant': Whether trend is statistically significant\n    - Method-specific statistics (slope, p-value, etc.)\n    - Optional: 'volatility', 'change_points', 'seasonal' components\n\nNotes\n-----\n**Method Selection Guidelines**:\n- **Linear Regression**: Best for continuous, approximately linear trends.\n  Provides slope, intercept, and R² metrics.\n- **Mann-Kendall**: Non-parametric, robust to outliers and non-normality.\n  Good for monotonic trends.\n- **Seasonal Analysis**: For data with periodic patterns. Removes seasonal\n  component before trend analysis.\n\n**Additional Analyses**:\n- **Volatility**: Measures stability/variability of the time series\n- **Change Points**: Detects significant shifts in mean or variance\n- **Seasonality**: Identifies and quantifies periodic patterns\n\n**Statistical Significance**:\nSignificance testing depends on the method:\n- Linear: p-value from t-test of slope ≠ 0\n- Mann-Kendall: p-value from normal approximation of S statistic\n- Seasonal: Significance of detrended series\n\n**Confidence Calculation**:\nOverall confidence in trend analysis considers:\n1. Sample size adequacy\n2. Statistical significance\n3. Trend strength magnitude\n4. Consistency with visual inspection (if available)\n\nExamples\n--------\n>>> import numpy as np\n>>>\n>>> # Generate upward trending data with some noise\n>>> for i in range(30):\n...     trend_value = 1000 + i * 50  # Upward trend\n...     noise = np.random.normal(0, 100)  # Random noise\n...     reasoner.update(trend_value + noise)\n...\n>>> # Analyze trend using linear regression\n>>> linear_result = reasoner.analyze_trends(method='linear')\n>>> if linear_result.success:\n...     trend = linear_result.result\n...     print(f\"Direction: {trend['direction']}\")\n...     print(f\"Strength: {trend['strength']:.3f}\")\n...     print(f\"Significant: {trend['significant']}\")\n...     print(f\"Slope: {trend.get('slope', 'N/A'):.3f}\")\n...     print(f\"R²: {trend.get('r_squared', 'N/A'):.3f}\")\n...\n>>> # Analyze with Mann-Kendall (non-parametric)\n>>> mk_result = reasoner.analyze_trends(method='mann_kendall')\n>>> if mk_result.success:\n...     print(f\"Mann-Kendall p-value: {mk_result.result.get('p_value', 'N/A'):.4f}\")\n...\n>>> # Analyze with change point detection\n>>> cp_result = reasoner.analyze_trends(method='linear', detect_changepoints=True)\n>>> if cp_result.success and 'change_points' in cp_result.result:\n...     print(f\"Detected {len(cp_result.result['change_points'])} change points\")"
            },
            {
              "name": "check_constraints",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Check all constraints and validators on the variable.\n\nPerforms comprehensive constraint checking including:\n- Hard constraints (must always satisfy)\n- Soft constraints (should satisfy, warnings allowed)\n- Validation rules (data type, range, format)\n- Domain-specific constraints from configuration\n\nReturns detailed violation reports, satisfaction metrics,\nand recommendations for constraint resolution.\n\nParameters\n----------\n**kwargs\n    Additional constraint checking parameters:\n    - include_warnings : bool\n        Include warning-level violations in results (default: False)\n    - severity_threshold : float\n        Minimum severity to report (default: 0.0 = report all)\n    - check_domain_constraints : bool\n        Check domain-specific constraints from config (default: True)\n\nReturns\n-------\nReasoningResult\n    Result containing constraint checking outcome. The `result` field\n    is a dictionary with:\n    - 'status': Overall constraint status\n    - 'satisfaction_score': Proportion of satisfied constraints [0.0, 1.0]\n    - 'violations': List of violated constraints with details\n    - 'warnings': List of warning-level issues (if include_warnings=True)\n    - 'statistics': Counts and summaries of constraint checking\n\nNotes\n-----\n**Constraint Types**:\n1. **Hard Constraints**: From `self.constraints`. Must always be satisfied.\n   Violations indicate invalid state.\n2. **Validators**: From `self.validators`. Data validation rules.\n3. **Domain Constraints**: From `self.reasoning_config['constraints']`.\n   Configurable, can be marked as 'error' or 'warning'.\n\n**Evaluation Process**:\n1. Each constraint function is called with current value\n2. Exceptions during evaluation are caught and reported as evaluation failures\n3. Results are categorized by constraint type and severity\n4. Statistics are computed for overall assessment\n\n**Satisfaction Score**:\nCalculated as: `satisfied_constraints / total_constraints`\n- 1.0: All constraints satisfied\n- 0.0: All constraints violated\n- Intermediate: Partial satisfaction\n\n**Confidence Calculation**:\nConfidence in constraint checking results is based on:\n- Satisfaction score (higher = higher confidence)\n- Absence of evaluation errors\n- Completeness of checking (all constraints evaluated)\n\nExamples\n--------\n>>>\n>>> # Add some constraints\n>>> reasoner.add_constraint(lambda x: 0 <= x <= 100)  # Physical range\n>>> reasoner.add_constraint(lambda x: x != 37.0)      # Avoid specific value\n>>> reasoner.add_validator(lambda x: isinstance(x, (int, float)))  # Type check\n>>>\n>>> # Check constraints with current value (25.0)\n>>> result = reasoner.check_constraints()\n>>> if result.success:\n...     print(f\"Satisfaction score: {result.result['satisfaction_score']:.2f}\")\n...     print(f\"Status: {result.result['status']}\")\n...     print(f\"Violations: {len(result.result['violations'])}\")\n...     print(f\"Confidence: {result.confidence:.2f}\")\n...\n>>> # Update to a violating value should fail\n>>> reasoner.update(37.0)\n>>> violated_result = reasoner.check_constraints(include_warnings=True)\n>>> if violated_result.success:\n...     for violation in violated_result.result['violations']:\n...         print(f\"Violation: {violation.get('constraint_type')}\")\n...         print(f\"  Satisfied: {violation.get('satisfied')}\")"
            },
            {
              "name": "record_result",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "result",
                  "type": "normal",
                  "annotation": "ReasoningResult",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Record a reasoning result in the history.\n\nAdds a reasoning result to the internal history for tracking,\nanalysis, and auditing purposes. Maintains history size limits\nto prevent memory issues.\n\nParameters\n----------\nresult : ReasoningResult\n    The reasoning result to record. Should include timestamp,\n    task type, success status, and confidence metrics.\n\nNotes\n-----\n**History Management**:\n- Results are stored in chronological order (oldest to newest)\n- History size is limited by `max_results_history` configuration\n- When limit exceeded, oldest results are removed (FIFO)\n- Default limit: 1000 results (configurable in reasoning_config)\n\n**Logging**:\nEach recorded result is logged at DEBUG level with key details:\n- Task type\n- Success status\n- Confidence level\n- Execution time\n\n**Use Cases**:\n- Performance monitoring and optimization\n- Debugging reasoning algorithms\n- Auditing and compliance\n- Learning from historical reasoning patterns\n- Providing context for future reasoning tasks\n\nExamples\n--------\n>>> from datetime import datetime, timezone\n>>> from dynexus import ReasoningResult, ReasoningTask\n>>>\n>>> # Create a result\n>>> result = ReasoningResult(\n...     task=ReasoningTask.ANOMALY_DETECTION,\n...     success=True,\n...     result={\"is_anomaly\": False},\n...     confidence=0.95,\n...     timestamp=datetime.now(timezone.utc),\n...     execution_time_ms=23.5\n... )\n>>>\n>>> # Record it\n>>> reasoner.record_result(result)\n>>>\n>>> # Verify recording\n>>> recent = reasoner.get_recent_results(n=1)\n>>> print(f\"Recorded {len(recent)} result(s)\")\n>>> if recent:\n...     print(f\"Latest task: {recent[0].task.name}\")"
            },
            {
              "name": "get_recent_results",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "task",
                  "type": "normal",
                  "annotation": "ReasoningTask | None",
                  "default": null
                },
                {
                  "name": "n",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                },
                {
                  "name": "since",
                  "type": "normal",
                  "annotation": "datetime | None",
                  "default": null
                },
                {
                  "name": "min_confidence",
                  "type": "normal",
                  "annotation": "float | None",
                  "default": null
                }
              ],
              "returns": "list[ReasoningResult]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get recent reasoning results with optional filtering.\n\nRetrieves reasoning results from history with flexible filtering\noptions. Results are returned in reverse chronological order\n(most recent first).\n\nParameters\n----------\ntask : Optional[ReasoningTask]\n    Filter by specific task type. If None, includes all tasks.\nn : int\n    Maximum number of results to return. Use 0 for all results.\nsince : Optional[datetime]\n    Only return results with timestamp >= this datetime.\n    Useful for retrieving results from a specific time period.\nmin_confidence : Optional[float]\n    Minimum confidence threshold. Only results with confidence\n    >= this value are returned.\n\nReturns\n-------\nlist[ReasoningResult]\n    Filtered list of reasoning results, most recent first.\n    Empty list if no results match the criteria.\n\nNotes\n-----\n**Filtering Logic**:\nFilters are applied in this order:\n1. Task type filter (if specified)\n2. Timestamp filter (if specified)\n3. Confidence filter (if specified)\n4. Limit to n results (if n > 0)\n\n**Performance**:\n- Linear time complexity O(k) where k is history size\n- Memory efficient: returns references, not copies\n- Consider using smaller n values for frequent queries\n\n**Use Cases**:\n- Monitoring recent reasoning activity\n- Debugging specific task types\n- Analyzing confidence trends over time\n- Providing context for current reasoning\n\nExamples\n--------\n>>> from datetime import datetime, timezone, timedelta\n>>> from dynexus import ReasoningTask\n>>>\n>>> # Get recent results\n>>> recent = reasoner.get_recent_results(n=5)\n>>> print(f\"Most recent {len(recent)} results:\")\n>>> for r in recent:\n...     print(f\"  {r.task.name}: success={r.success}, conf={r.confidence:.2f}\")\n>>>\n>>> # Get high-confidence anomaly detections from last hour\n>>> one_hour_ago = datetime.now(timezone.utc) - timedelta(hours=1)\n>>> anomalies = reasoner.get_recent_results(\n...     task=ReasoningTask.ANOMALY_DETECTION,\n...     since=one_hour_ago,\n...     min_confidence=0.8\n... )\n>>> print(f\"High-confidence anomalies in last hour: {len(anomalies)}\")\n>>>\n>>> # Get all results for analysis\n>>> all_results = reasoner.get_recent_results(n=0)\n>>> print(f\"Total results in history: {len(all_results)}\")"
            },
            {
              "name": "clear_results",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "task",
                  "type": "normal",
                  "annotation": "ReasoningTask | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Clear reasoning results from history.\n\nRemoves reasoning results from the internal history, either\nfor a specific task type or all tasks. Useful for managing\nmemory, resetting state, or removing outdated results.\n\nParameters\n----------\ntask : Optional[ReasoningTask]\n    If specified, only clear results for this task type.\n    If None, clear all results from history.\n\nNotes\n-----\n**Clearing Behavior**:\n- Partial clearing (by task): Only removes results of specified task\n- Complete clearing (task=None): Removes all results\n- Operation is irreversible (consider backing up if needed)\n- Logs the number of cleared results for auditing\n\n**Use Cases**:\n- Freeing memory when history grows too large\n- Resetting reasoner state for new experiments\n- Removing low-quality or erroneous results\n- Preparing for different operating conditions\n\n**Best Practices**:\n- Consider keeping some history for context\n- Use task-specific clearing for targeted cleanup\n- Monitor history size and clear proactively\n- Document clearing operations in system logs\n\nExamples\n--------\n>>> from dynexus import ReasoningTask\n>>>\n>>> # Check current history size\n>>> print(f\"Results before clearing: {len(reasoner.results_history)}\")\n>>>\n>>> # Clear all results\n>>> reasoner.clear_results()\n>>> print(f\"Results after clearing all: {len(reasoner.results_history)}\")\n>>>\n>>> # Generate some new results\n>>> reasoner.predict_value(steps_ahead=1)\n>>> reasoner.analyze_trends()\n>>> reasoner.predict_value(steps_ahead=2)\n>>>\n>>> # Clear only prediction results\n>>> reasoner.clear_results(task=ReasoningTask.VALUE_PREDICTION)\n>>>\n>>> # Verify: predictions cleared, trend analysis remains\n>>> remaining = reasoner.get_recent_results(n=0)\n>>> print(f\"Remaining results: {len(remaining)}\")\n>>> for r in remaining:\n...     print(f\"  {r.task.name}\")"
            },
            {
              "name": "get_result_statistics",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get statistics about reasoning results.\n\nComputes comprehensive statistics on the reasoning results history,\nincluding success rates, confidence distributions, task frequencies,\nand recent activity patterns.\n\nReturns\n-------\ndict[str, Any]\n    Dictionary containing detailed statistics:\n    - 'total_results': Total number of results in history\n    - 'success_rate': Overall success rate [0.0, 1.0]\n    - 'avg_confidence': Average confidence of successful results\n    - 'results_by_task': Dict with counts and metrics per task type\n    - 'recent_activity': Metrics for last 24 hours\n    - 'first_result': Timestamp of oldest result\n    - 'last_result': Timestamp of most recent result\n\nNotes\n-----\n**Statistical Metrics**:\n- **Success Rate**: Proportion of successful results\n- **Average Confidence**: Mean confidence of successful results only\n- **Task Distribution**: Breakdown by task type with per-task metrics\n- **Recent Activity**: Focus on last 24 hours for current patterns\n\n**Calculation Details**:\n- Excludes failed results from confidence averages\n- Handles empty history gracefully\n- Uses numpy for statistical computations\n- Recent activity based on UTC timestamps\n\n**Use Cases**:\n- Monitoring reasoner performance and reliability\n- Identifying problematic task types\n- Tracking confidence trends over time\n- Capacity planning based on activity levels\n- Quality assurance and validation\n\nExamples\n--------\n>>> # Get statistics\n>>> stats = reasoner.get_result_statistics()\n>>>\n>>> print(f\"Total results: {stats['total_results']}\")\n>>> print(f\"Success rate: {stats['success_rate']:.2%}\")\n>>> print(f\"Average confidence: {stats['avg_confidence']:.3f}\")\n>>>\n>>> # Analyze by task\n>>> for task_name, task_stats in stats['results_by_task'].items():\n...     print(f\"{task_name}:\")\n...     print(f\"  Count: {task_stats['count']}\")\n...     print(f\"  Success rate: {task_stats['success_rate']:.2%}\")\n...     print(f\"  Avg confidence: {task_stats['avg_confidence']:.3f}\")\n>>>\n>>> # Recent activity\n>>> recent = stats['recent_activity']\n>>> print(f\"Last 24 hours: {recent['last_24_hours']} results\")\n>>> print(f\"Most active task: {recent['most_active_task']}\")\n>>> print(f\"Last execution: {recent['last_execution']}\")"
            },
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert reasoner to dictionary representation.\n\nReturns\n-------\ndict[str, Any]\n    Dictionary containing reasoner information including\n    base variable properties and reasoning-specific data."
            },
            {
              "name": "_extract_numeric_history",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "history",
                  "type": "normal",
                  "annotation": "list[tuple[datetime, Any, float]]",
                  "default": null
                }
              ],
              "returns": "tuple[np.ndarray, np.ndarray]",
              "decorators": [],
              "is_async": false,
              "docstring": "Extract numeric values and timestamps from history.\n\nParameters\n----------\nhistory : list[tuple[datetime, Any, float]]\n    History list from Variable.get_history()\n\nReturns\n-------\ntuple[np.ndarray, np.ndarray]\n    Tuple of (values_array, timestamps_array) where values are\n    float-converted and timestamps are Unix timestamps (seconds)."
            },
            {
              "name": "_calculate_granger_proxy",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "series1",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "series2",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [],
              "is_async": false,
              "docstring": "Simplified Granger causality proxy using cross-correlation.\n\nParameters\n----------\nseries1, series2 : np.ndarray\n    Time series to analyze.\n\nReturns\n-------\nfloat\n    Relationship score [0.0, 1.0]."
            },
            {
              "name": "_calculate_transfer_entropy_proxy",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "series1",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "series2",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [],
              "is_async": false,
              "docstring": "Transfer entropy proxy using correlation analysis.\n\nParameters\n----------\nseries1, series2 : np.ndarray\n    Time series to analyze.\n\nReturns\n-------\nfloat\n    Relationship score [0.0, 1.0].\n\nNotes\n-----\nThis is a placeholder implementation. A proper transfer entropy\ncalculation would require more sophisticated information-theoretic\nmethods."
            },
            {
              "name": "_calculate_value_distance",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value1",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "value2",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate normalized distance between two values.\n\nParameters\n----------\nvalue1, value2 : Any\n    Values to compare.\n\nReturns\n-------\nfloat\n    Normalized distance [0.0, 1.0] where 0.0 = identical,\n    1.0 = maximally different."
            },
            {
              "name": "_create_intervention_steps",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "current",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "target",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "distance",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "list[dict[str, Any]]",
              "decorators": [],
              "is_async": false,
              "docstring": "Create intervention steps based on distance and complexity."
            },
            {
              "name": "_estimate_intervention_risk",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "distance",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Estimate intervention risk."
            },
            {
              "name": "_estimate_intervention_duration",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "distance",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Estimate intervention duration.\n\nParameters\n----------\ndistance : float\n    Normalized distance to target.\n**kwargs\n    Additional parameters.\n\nReturns\n-------\ndict[str, Any]\n    Duration estimates."
            },
            {
              "name": "_get_monitoring_requirements",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get monitoring requirements for intervention.\n\nReturns\n-------\ndict[str, Any]\n    Monitoring suggestions."
            },
            {
              "name": "_calculate_plan_confidence",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "current_confidence",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "distance",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "float",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate confidence in intervention plan."
            },
            {
              "name": "_execute_intervention_plan",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "plan",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Execute an intervention plan by updating the variable."
            },
            {
              "name": "_evaluate_constraint",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "constraint",
                  "type": "normal",
                  "annotation": "Callable[[Any], bool]",
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "index",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                },
                {
                  "name": "constraint_type",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Evaluate a single constraint function.\n\nParameters\n----------\nconstraint : Callable\n    Constraint function.\nvalue : Any\n    Value to test.\nindex : int\n    Constraint index.\nconstraint_type : str\n    Type of constraint.\n\nReturns\n-------\ndict[str, Any]\n    Evaluation result."
            },
            {
              "name": "_simple_average_prediction",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "steps_ahead",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Quick average-based prediction fallback.\n\nParameters\n----------\nvalues : np.ndarray\n    Historical values.\nsteps_ahead : int\n    Steps to predict ahead.\n\nReturns\n-------\ndict[str, Any]\n    Prediction result."
            },
            {
              "name": "_seasonal_trend_analysis",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "timestamps",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "seasonality_period",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Perform seasonal trend analysis.\n\nParameters\n----------\nvalues : np.ndarray\n    Time series values.\ntimestamps : np.ndarray\n    Corresponding timestamps.\nseasonality_period : int\n    Seasonal period.\n\nReturns\n-------\ndict[str, Any]\n    Trend analysis results."
            },
            {
              "name": "_linear_trend_analysis",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "timestamps",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Perform linear trend analysis.\n\nParameters\n----------\nvalues : np.ndarray\n    Time series values.\ntimestamps : np.ndarray\n    Corresponding timestamps.\n\nReturns\n-------\ndict[str, Any]\n    Trend analysis results."
            },
            {
              "name": "_create_failed_result",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "task",
                  "type": "normal",
                  "annotation": "ReasoningTask",
                  "default": null
                },
                {
                  "name": "error",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "metadata",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                },
                {
                  "name": "execution_time_ms",
                  "type": "normal",
                  "annotation": "float | None",
                  "default": null
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Create a standardized failed reasoning result.\n\nParameters\n----------\ntask : ReasoningTask\n    Task that failed.\nerror : str\n    Error description.\nmetadata : Optional[dict]\n    Additional metadata.\nexecution_time_ms : Optional[float]\n    Execution time.\n\nReturns\n-------\nReasoningResult\n    Failed result."
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Representation of the reasoner.\n\nReturns\n-------\nstr\n    Compact string representation."
            },
            {
              "name": "__repr__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Detailed representation of the reasoner.\n\nReturns\n-------\nstr\n    Detailed string representation for debugging."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        }
      ]
    },
    "dynexus/core/variable/domain.py": {
      "path": "dynexus/core/variable/domain.py",
      "module_docstring": "Domain-specific Variable implementations.",
      "imports": [
        {
          "module": "__future__",
          "name": "annotations",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "numpy",
          "alias": "np"
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        },
        {
          "module": "base",
          "name": "BaseVariable",
          "alias": null,
          "level": 1
        },
        {
          "module": "base",
          "name": "VariableRole",
          "alias": null,
          "level": 1
        }
      ],
      "classes": [
        {
          "name": "ContinuousVariable",
          "bases": [
            "BaseVariable"
          ],
          "docstring": "Variable representing continuous quantities.\n\nSupports ranges, bounds, and continuous validation.\n\nExamples\n--------\n>>> from dynexus import ContinuousVariable, VariableRole\n>>>\n>>> # Create a bounded continuous variable\n>>> temperature = ContinuousVariable(\n...     name=\"temperature\",\n...     description=\"Ambient temperature\",\n...     units=\"Celsius\",\n...     role=VariableRole.ENDOGENOUS,\n...     lower_bound=-273.15,  # Absolute zero\n...     upper_bound=1000.0,   # Reasonable maximum\n...     default_value=22.0\n... )\n>>>\n>>> # Update with valid value\n>>> temperature.update(25.5, confidence=0.9)\nTrue\n>>>\n>>> # Invalid value (too low)\n>>> temperature.update(-300.0)\nFalse",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "lower_bound",
                  "type": "normal",
                  "annotation": "float | None",
                  "default": null
                },
                {
                  "name": "upper_bound",
                  "type": "normal",
                  "annotation": "float | None",
                  "default": null
                },
                {
                  "name": "default_value",
                  "type": "normal",
                  "annotation": "float | None",
                  "default": null
                },
                {
                  "name": "tolerance",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a continuous variable.\n\nParameters\n----------\nname : str\n    Variable name.\ndescription : str, optional\n    Human-readable description.\nunits : str, optional\n    Physical units.\nrole : VariableRole, optional\n    Role in causal relationships.\nlower_bound : float, optional\n    Lower bound for valid values.\nupper_bound : float, optional\n    Upper bound for valid values.\ndefault_value : float, optional\n    Default value.\ntolerance : float, optional\n    Numerical tolerance for comparisons.\nconfig : dict, optional\n    Configuration dictionary."
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate a continuous value.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value is a valid continuous number."
            },
            {
              "name": "is_within_bounds",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "margin",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check if value is within bounds with optional margin.\n\nParameters\n----------\nvalue : float\n    Value to check.\nmargin : float, optional\n    Safety margin (default: 0).\n\nReturns\n-------\nbool\n    True if value is within bounds ± margin."
            },
            {
              "name": "distance_to_boundary",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "tuple[float | None, float | None]",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate distance to lower and upper boundaries.\n\nParameters\n----------\nvalue : float\n    Current value.\n\nReturns\n-------\ntuple[Optional[float], Optional[float]]\n    Distance to lower bound (positive if above) and\n    distance to upper bound (positive if below)."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "DiscreteVariable",
          "bases": [
            "BaseVariable"
          ],
          "docstring": "Variable representing discrete quantities.\n\nSupports finite sets of allowed values and categorical data.\n\nExamples\n--------\n>>> from dynexus import DiscreteVariable, VariableRole\n>>>\n>>> # Create a categorical variable\n>>> status = DiscreteVariable(\n...     name=\"system_status\",\n...     description=\"System operational status\",\n...     role=VariableRole.ENDOGENOUS,\n...     allowed_values=[\"running\", \"stopped\", \"error\", \"maintenance\"],\n...     default_value=\"stopped\"\n... )\n>>>\n>>> # Valid update\n>>> status.update(\"running\", confidence=0.95)\nTrue\n>>>\n>>> # Invalid update\n>>> status.update(\"invalid_status\")\nFalse",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "allowed_values",
                  "type": "normal",
                  "annotation": "list[Any] | None",
                  "default": null
                },
                {
                  "name": "default_value",
                  "type": "normal",
                  "annotation": "Any | None",
                  "default": null
                },
                {
                  "name": "allow_none",
                  "type": "normal",
                  "annotation": "bool",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a discrete variable.\n\nParameters\n----------\nname : str\n    Variable name.\ndescription : str, optional\n    Human-readable description.\nunits : str, optional\n    Physical units.\nrole : VariableRole, optional\n    Role in causal relationships.\nallowed_values : list[Any], optional\n    List of allowed values. If None, any value is allowed.\ndefault_value : Any, optional\n    Default value if not specified.\nallow_none : bool, optional\n    Whether None is an allowed value.\nconfig : dict, optional\n    Configuration dictionary."
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate a discrete value.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value is valid."
            },
            {
              "name": "add",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Add a value to the allowed set.\n\nParameters\n----------\nvalue : Any\n    Value to add."
            },
            {
              "name": "remove",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Remove a value from the allowed set.\n\nParameters\n----------\nvalue : Any\n    Value to remove.\n\nReturns\n-------\nbool\n    True if value was removed."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "BooleanVariable",
          "bases": [
            "DiscreteVariable"
          ],
          "docstring": "Specialized discrete variable for boolean values.\n\nExamples\n--------\n>>> from dynexus import BooleanVariable, VariableRole\n>>>\n>>> # Create a boolean variable\n>>> is_active = BooleanVariable(\n...     name=\"is_active\",\n...     description=\"Whether system is active\",\n...     role=VariableRole.ENDOGENOUS,\n...     default_value=False\n... )\n>>>\n>>> # Update with boolean value\n>>> is_active.update(True, confidence=0.99)\nTrue",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "default_value",
                  "type": "normal",
                  "annotation": "bool | None",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a boolean variable.\n\nParameters\n----------\nname : str\n    Variable name.\ndescription : str, optional\n    Human-readable description.\nrole : VariableRole, optional\n    Role in causal relationships.\ndefault_value : bool, optional\n    Default value if not specified.\nconfig : dict, optional\n    Configuration dictionary."
            },
            {
              "name": "toggle",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Toggle the boolean value.\n\nReturns\n-------\nbool\n    True if toggle was successful."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "VectorVariable",
          "bases": [
            "BaseVariable"
          ],
          "docstring": "Variable representing vector/multi-dimensional quantities.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import VectorVariable, VariableRole\n>>>\n>>> # Create a 3D position vector\n>>> position = VectorVariable(\n...     name=\"position\",\n...     description=\"3D position vector\",\n...     units=\"meters\",\n...     role=VariableRole.ENDOGENOUS,\n...     dimension=3,\n...     default_value=np.array([0.0, 0.0, 0.0])\n... )\n>>>\n>>> # Update with vector\n>>> position.update(np.array([1.0, 2.0, 3.0]), confidence=0.9)\nTrue",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "dimension",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                },
                {
                  "name": "default_value",
                  "type": "normal",
                  "annotation": "np.ndarray[Any, Any] | None",
                  "default": null
                },
                {
                  "name": "norm_bound",
                  "type": "normal",
                  "annotation": "float | None",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a vector variable.\n\nParameters\n----------\nname : str\n    Variable name.\ndescription : str, optional\n    Human-readable description.\nunits : str, optional\n    Physical units.\nrole : VariableRole, optional\n    Role in causal relationships.\ndimension : int, optional\n    Expected dimension. If None, any dimension is allowed.\ndefault_value : np.ndarray, optional\n    Default vector value.\nnorm_bound : float, optional\n    Maximum allowed norm of the vector.\nconfig : dict, optional\n    Configuration dictionary."
            },
            {
              "name": "_check_dimension",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "np.ndarray[Any, Any]",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check if vector has correct dimension."
            },
            {
              "name": "_check_norm",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "np.ndarray[Any, Any]",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check if vector norm is within bound."
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate a vector value.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value is a valid vector."
            },
            {
              "name": "get_norm",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "float | None",
              "decorators": [],
              "is_async": false,
              "docstring": "Get the norm of the current vector.\n\nReturns\n-------\nOptional[float]\n    Vector norm, or None if no value set."
            },
            {
              "name": "get_unit_vector",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "np.ndarray[Any, Any] | None",
              "decorators": [],
              "is_async": false,
              "docstring": "Get the unit vector in the current direction.\n\nReturns\n-------\nOptional[np.ndarray]\n    Unit vector, or None if no value set or zero vector."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        }
      ]
    },
    "dynexus/core/variable/state.py": {
      "path": "dynexus/core/variable/state.py",
      "module_docstring": "VariableState for managing state snapshots and transitions.",
      "imports": [
        {
          "module": "__future__",
          "name": "annotations",
          "alias": null,
          "level": 0
        },
        {
          "module": "json",
          "alias": null
        },
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "datetime",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "timezone",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "Enum",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "auto",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "utils.id",
          "name": "generate_id",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        }
      ],
      "classes": [
        {
          "name": "StateTransitionType",
          "bases": [
            "Enum"
          ],
          "docstring": "Enumeration of state transition types.\n\nDefines the various reasons why a state might change, which is useful for\nauditing, debugging, and understanding the provenance of state changes.\n\nAttributes\n----------\n    UPDATE: Normal value update during regular operation\n    INTERVENTION: External manual intervention or override\n    RESET: Reset to initial or default state values\n    RECONCILIATION: Conflict resolution between conflicting states\n    OBSERVATION: Update based on observed data from external sources\n    SIMULATION: Change during simulation or what-if analysis",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "StateTransition",
          "bases": [],
          "docstring": "Immutable record of a single state transition event.\n\nCaptures a complete snapshot of a state change, including what changed,\nwhy it changed, and when it changed. Useful for audit trails, undo/redo\nfunctionality, debugging, and state history analysis.\n\nAttributes\n----------\n    from_state: Dictionary representing the state before the transition\n    to_state: Dictionary representing the state after the transition\n    transition_type: Type of transition from StateTransitionType enum\n    timestamp: When the transition occurred (recommended: UTC timezone)\n    reason: Optional human-readable explanation for the transition\n    metadata: Optional additional context-specific information\n\nExamples\n--------\n>>> from datetime import datetime, timezone\n>>> from dynexus import StateTransition, StateTransitionType\n>>> transition = StateTransition(\n...     from_state={\"temp\": 20},\n...     to_state={\"temp\": 22},\n...     transition_type=StateTransitionType.UPDATE,\n...     timestamp=datetime.now(timezone.utc),\n...     reason=\"User adjusted temperature\"\n... )",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "VariableState",
          "bases": [],
          "docstring": "Represents a complete state snapshot of a variable.\n\nCaptures value, uncertainty, confidence, and metadata at a specific\npoint in time. Supports state transitions and history tracking.\n\nExamples\n--------\n>>> from dynexus import VariableState, StateTransitionType\n>>>\n>>> # Create initial state\n>>> state1 = VariableState(\n...     value=22.5,\n...     confidence=0.9,\n...     uncertainty=0.1,\n...     metadata={\"source\": \"sensor_1\"}\n... )\n>>>\n>>> # Create updated state\n>>> state2 = VariableState(\n...     value=23.0,\n...     confidence=0.85,\n...     uncertainty=0.15,\n...     metadata={\"source\": \"sensor_2\"}\n... )\n>>>\n>>> # Record transition\n>>> transition = state1.transition_to(\n...     state2,\n...     transition_type=StateTransitionType.UPDATE,\n...     reason=\"Sensor reading update\"\n... )\n>>>\n>>> print(f\"Value change: {transition.from_state['value']} → {transition.to_state['value']}\")\nValue change: 22.5 → 23.0",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "confidence",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "uncertainty",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "status",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "metadata",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                },
                {
                  "name": "parent_variable_id",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "parent_variable_name",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a variable state.\n\nParameters\n----------\nvalue : Any\n    Value of the variable in this state.\nconfidence : float\n    Confidence in the value (0 to 1).\nuncertainty : float\n    Uncertainty level (0 to 1).\nstatus : str, optional\n    Status of the variable in this state.\nmetadata : dict, optional\n    Additional metadata about this state.\nparent_variable_id : str, optional\n    ID of the parent variable.\nparent_variable_name : str, optional\n    Name of the parent variable.\n\nRaises\n------\nValueError\n    If confidence or uncertainty not in [0, 1]."
            },
            {
              "name": "touch",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Update the last modified timestamp."
            },
            {
              "name": "is_compatible_to",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "other",
                  "type": "normal",
                  "annotation": "VariableState",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check compatibility to another VariableState.\n\nParameters\n----------\nother : VariableState\n    Other state to verify compatibility with.\n\nReturns\n-------\nbool\n    True if compatible, False otherwise"
            },
            {
              "name": "transition_to",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "new_state",
                  "type": "normal",
                  "annotation": "VariableState",
                  "default": null
                },
                {
                  "name": "transition_type",
                  "type": "normal",
                  "annotation": "StateTransitionType",
                  "default": null
                },
                {
                  "name": "reason",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "metadata",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "StateTransition",
              "decorators": [],
              "is_async": false,
              "docstring": "Record a transition from this state to a new state.\n\nParameters\n----------\nnew_state : VariableState\n    The new state being transitioned to.\ntransition_type : StateTransitionType\n    Type of transition.\nreason : str, optional\n    Reason for the transition.\nmetadata : dict, optional\n    Additional metadata about the transition.\n\nReturns\n-------\nStateTransition\n    Record of the transition.\n\nRaises\n------\nValueError\n    If states are not from the same variable."
            },
            {
              "name": "self_transition",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "transition_type",
                  "type": "normal",
                  "annotation": "StateTransitionType",
                  "default": null
                },
                {
                  "name": "reason",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "metadata",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "StateTransition",
              "decorators": [],
              "is_async": false,
              "docstring": "Record a transition where the state remains unchanged.\n\nParameters\n----------\ntransition_type : StateTransitionType\n    Type of transition.\nreason : str, optional\n    Reason for the transition.\nmetadata : dict, optional\n    Additional metadata about the transition.\n\nReturns\n-------\nStateTransition\n    Record of the transition.\n\nNotes\n-----\nUseful for:\n- Auditing no-op operations\n- Recording status updates without value changes\n- Heartbeat/keepalive signals\n- Metadata-only updates"
            },
            {
              "name": "distance_to",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "other",
                  "type": "normal",
                  "annotation": "VariableState",
                  "default": null
                },
                {
                  "name": "metric",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate distance to another state.\n\nParameters\n----------\nother : VariableState\n    Other state to compare with.\nmetric : str, optional\n    Distance metric to use:\n    - \"value\": Absolute difference in values (for numeric)\n    - \"confidence\": Difference in confidence\n    - \"uncertainty\": Difference in uncertainty\n    - \"composite\": Weighted combination\n\nReturns\n-------\nfloat\n    Distance between states.\n\nRaises\n------\nValueError\n    If metric is unknown or states are incompatible."
            },
            {
              "name": "is_similar_to",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "other",
                  "type": "normal",
                  "annotation": "VariableState",
                  "default": null
                },
                {
                  "name": "tolerance",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "metric",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check if this state is similar to another state.\n\nParameters\n----------\nother : VariableState\n    Other state to compare with.\ntolerance : float, optional\n    Maximum allowed distance (default: 0.01).\nmetric : str, optional\n    Distance metric to use.\n\nReturns\n-------\nbool\n    True if states are similar within tolerance."
            },
            {
              "name": "interpolate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "other",
                  "type": "normal",
                  "annotation": "VariableState",
                  "default": null
                },
                {
                  "name": "alpha",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "VariableState",
              "decorators": [],
              "is_async": false,
              "docstring": "Interpolate between this state and another state.\n\nParameters\n----------\nother : VariableState\n    Other state to interpolate to.\nalpha : float, optional\n    Interpolation parameter (0 = this state, 1 = other state).\n\nReturns\n-------\nVariableState\n    Interpolated state.\n\nRaises\n------\nValueError\n    If states are incompatible or alpha not in [0, 1]."
            },
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert state to dictionary representation.\n\nReturns\n-------\ndict[str, Any]\n    Dictionary containing state information."
            },
            {
              "name": "to_json",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert state to JSON string.\n\nReturns\n-------\nstr\n    JSON representation of the state."
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Representation of the state."
            },
            {
              "name": "__repr__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Detailed representation of the state."
            }
          ],
          "class_methods": [
            {
              "name": "from_dict",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "data",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "VariableState",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create a VariableState from dictionary.\n\nParameters\n----------\ndata : dict[str, Any]\n    Dictionary containing state information.\n\nReturns\n-------\nVariableState\n    Reconstructed VariableState.\n\nNotes\n-----\nThis doesn't fully restore all metadata like transitions.\nIt's meant for serialization/deserialization of state snapshots.\n\nId and created date are different from the dictionary."
            }
          ],
          "static_methods": [],
          "properties": [
            {
              "name": "id",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the state's unique identifier."
            },
            {
              "name": "created_at",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "datetime",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the creation timestamp."
            },
            {
              "name": "updated_at",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "datetime",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the last update timestamp."
            }
          ]
        }
      ],
      "functions": [],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        }
      ]
    },
    "dynexus/core/variable/random.py": {
      "path": "dynexus/core/variable/random.py",
      "module_docstring": "Reproducible random number generation.",
      "imports": [
        {
          "module": "__future__",
          "name": "annotations",
          "alias": null,
          "level": 0
        },
        {
          "module": "threading",
          "alias": null
        },
        {
          "module": "collections.abc",
          "name": "Generator",
          "alias": null,
          "level": 0
        },
        {
          "module": "contextlib",
          "name": "contextmanager",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "TypedDict",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "overload",
          "alias": null,
          "level": 0
        },
        {
          "module": "numpy",
          "alias": "np"
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.types",
          "name": "FloatArray",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.types",
          "name": "IntArray",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.types",
          "name": "Number",
          "alias": null,
          "level": 3
        },
        {
          "module": "os",
          "alias": null
        },
        {
          "module": "random",
          "alias": "py_random"
        },
        {
          "module": "time",
          "alias": null
        }
      ],
      "classes": [
        {
          "name": "RandomStateTuple",
          "bases": [
            "TypedDict"
          ],
          "docstring": "Type-safe representation of numpy RandomState internal tuple.",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "RandomState",
          "bases": [],
          "docstring": "Guaranteed reproducible random state.\n\nThis class provides deterministic random number generation with full\nstate checkpointing and restoration capabilities. It is essential for:\n\n1. **Scientific Reproducibility**: Same seed → same random sequence\n2. **Debugging**: Replay exact stochastic scenarios\n3. **Optimization Consistency**: Deterministic optimization algorithms\n4. **Parallel Execution**: Thread-safe isolated random states\n5. **Stateful Reasoning**: Variables maintain random state history\n\nThe implementation never uses numpy's global random state (`np.random`)\nto ensure complete isolation and determinism.\n\nAttributes\n----------\nlabel : str\n    Descriptive label for debugging and logging.\n_rng : np.random.RandomState\n    Isolated numpy RandomState instance.\n_seed : int | None\n    Current random seed, or None if using entropy.\n_is_deterministic : bool\n    True if seeded deterministically, False if using entropy.\n_operation_counter : int\n    Counter of random operations performed.\n_state_history : list[dict[str, Any]]\n    History of state checkpoints.\n_child_states : dict[str, RandomState]\n    Child random states derived from this one.\n_lock : threading.RLock\n    Lock for thread-safe operations.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import RandomState\n>>>\n>>> # Create deterministic random state\n>>> rng = RandomState(seed=42, label=\"test\")\n>>> samples1 = rng.normal(size=5)\n>>>\n>>> # Create another with same seed - will produce identical sequence\n>>> rng2 = RandomState(seed=42, label=\"test2\")\n>>> samples2 = rng2.normal(size=5)\n>>> np.allclose(samples1, samples2)\nTrue\n>>>\n>>> # Create checkpoint for analysis\n>>> checkpoint = rng.checkpoint(\"before_intervention\")\n>>> # ... perform operations ...\n>>> rng.restore_checkpoint(checkpoint)  # Back to pre-intervention state\n\nNotes\n-----\nThis implementation uses numpy's RandomState which provides the Mersenne\nTwister PRNG algorithm (MT19937) with a period of 2^19937-1.",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                },
                {
                  "name": "label",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a reproducible random state.\n\nParameters\n----------\nseed : int | None\n    Random seed for deterministic behavior. If None, uses entropy\n    from system sources (time, PID, etc.) for true randomness.\nlabel : str\n    Descriptive label for debugging and logging.\n\nRaises\n------\nValueError\n    If seed is provided but not an integer."
            },
            {
              "name": "seed",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Reset random state with new seed.\n\nResets the internal random number generator and clears operation\nhistory. After calling this method, the random state will produce\nthe same sequence as if it were freshly initialized with this seed.\n\nParameters\n----------\nseed : int\n    Random seed for deterministic behavior.\n\nRaises\n------\nValueError\n    If seed is not an integer."
            },
            {
              "name": "get_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get complete current random state for checkpointing.\n\nReturns a serializable dictionary containing all information needed\nto restore the exact random state later. This includes numpy's\ninternal PRNG state, operation counters, and child states.\n\nReturns\n-------\ndict[str, Any]\n    Serializable state dictionary.\n\nExamples\n--------\n>>> from dynexus import RandomState\n>>> rng = RandomState(seed=42)\n>>> # Perform some operations\n>>> _ = rng.normal(size=10)\n>>> # Save state\n>>> state = rng.get_state()\n>>> # Restore later\n>>> rng.set_state(state)"
            },
            {
              "name": "set_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "state",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Restore random state from checkpoint.\n\nParameters\n----------\nstate : dict[str, Any]\n    State dictionary previously obtained from get_state().\n\nRaises\n------\nValueError\n    If state dictionary is malformed or incompatible."
            },
            {
              "name": "checkpoint",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Create a checkpoint of current state.\n\nParameters\n----------\nname : str | None\n    Optional name for the checkpoint. If None, generates automatic\n    name based on operation counter.\n\nReturns\n-------\nstr\n    Checkpoint ID that can be used to restore state.\n\nExamples\n--------\n>>> from dynexus import RandomState\n>>> rng = RandomState(seed=42)\n>>> checkpoint1 = rng.checkpoint(\"initial_state\")\n>>> _ = rng.normal(size=10)\n>>> checkpoint2 = rng.checkpoint(\"after_sampling\")\n>>> rng.restore_checkpoint(checkpoint1)  # Back to initial state\nTrue\n>>> rng.restore_checkpoint(checkpoint2)  # Back to after_sampling state\nTrue"
            },
            {
              "name": "restore_checkpoint",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "checkpoint_id",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Restore state from checkpoint ID.\n\nParameters\n----------\ncheckpoint_id : str\n    Checkpoint ID to restore.\n\nReturns\n-------\nbool\n    True if restoration was successful, False otherwise.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import RandomState\n>>> rng = RandomState(seed=42, label=\"test\")\n>>> checkpoint1 = rng.checkpoint(\"state1\")\n>>> samples1 = rng.normal(size=5)\n>>> checkpoint2 = rng.checkpoint(\"state2\")\n>>> samples2 = rng.normal(size=5)\n>>>\n>>> # Restore to first checkpoint\n>>> rng.restore_checkpoint(checkpoint1)\nTrue\n>>> new_samples = rng.normal(size=5)\n>>> # new_samples will be identical to samples1\n>>> np.allclose(samples1, new_samples)\nTrue"
            },
            {
              "name": "create_child_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "RandomState",
              "decorators": [],
              "is_async": false,
              "docstring": "Create a child random state derived from this one.\n\nChild states are useful for hierarchical random state management,\nsuch as isolating subsystems while maintaining overall reproducibility.\n\nParameters\n----------\nname : str\n    Name for the child state.\nseed : int | None\n    Seed for child. If None, generates seed from parent's current\n    state to ensure deterministic derivation.\n\nReturns\n-------\nRandomState\n    Child random state.\n\nExamples\n--------\n>>> from dynexus import RandomState\n>>> parent = RandomState(seed=42)\n>>> child = parent.create_child_state(\"subsystem\")\n>>> # Child produces deterministic sequence based on parent's state\n>>> child_samples = child.normal(size=5)"
            },
            {
              "name": "_record_operation",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Record that a random operation has been performed."
            },
            {
              "name": "normal",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "loc",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "scale",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "size",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate normally distributed random numbers.\n\nParameters\n----------\nloc : float\n    Mean of the distribution.\nscale : float\n    Standard deviation of the distribution.\nsize : int | None\n    Number of samples to generate.\n\nReturns\n-------\nFloatArray\n    Array of normally distributed samples."
            },
            {
              "name": "uniform",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "low",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "high",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "size",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate uniformly distributed random numbers.\n\nParameters\n----------\nlow : float\n    Lower bound of the distribution.\nhigh : float\n    Upper bound of the distribution.\nsize : int | None\n    Number of samples to generate.\n\nReturns\n-------\nFloatArray\n    Array of uniformly distributed samples."
            },
            {
              "name": "randint",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "low",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                },
                {
                  "name": "high",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                },
                {
                  "name": "size",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "IntArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate random integers.\n\nParameters\n----------\nlow : int\n    Lower bound (inclusive).\nhigh : int | None\n    Upper bound (exclusive). If None, low is used as upper bound\n    and 0 as lower bound.\nsize : int | None\n    Number of samples to generate.\n\nReturns\n-------\nIntArray\n    Array of random integers."
            },
            {
              "name": "choice",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "a",
                  "type": "normal",
                  "annotation": "int | FloatArray | IntArray",
                  "default": null
                },
                {
                  "name": "size",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                },
                {
                  "name": "replace",
                  "type": "normal",
                  "annotation": "bool",
                  "default": null
                },
                {
                  "name": "p",
                  "type": "normal",
                  "annotation": "FloatArray | None",
                  "default": null
                }
              ],
              "returns": "FloatArray | IntArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate random samples from a given array.\n\nParameters\n----------\na : int | FloatArray | IntArray\n    If an int, random sample from np.arange(a).\nsize : int | None\n    Number of samples to generate.\nreplace : bool\n    Whether to sample with replacement.\np : FloatArray | None\n    Probabilities associated with each entry in a.\n\nReturns\n-------\nFloatArray | IntArray\n    Random samples from a."
            },
            {
              "name": "shuffle",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "FloatArray | IntArray | list[float] | list[int]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Shuffle array in-place.\n\nParameters\n----------\nx : FloatArray | IntArray | list[float] | list[int]\n    Array to shuffle."
            },
            {
              "name": "random",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "random",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "size",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "random",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "size",
                  "type": "normal",
                  "annotation": "tuple[int, ...]",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "random",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "size",
                  "type": "normal",
                  "annotation": "int | tuple[int, ...] | None",
                  "default": null
                }
              ],
              "returns": "float | FloatArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate uniform random numbers in [0, 1).\n\nGenerates random numbers from a uniform distribution over the\nsemi-open interval [0, 1). This is a fundamental operation used\nfor generating samples from arbitrary distributions via inverse\ntransform sampling.\n\nParameters\n----------\nsize : int | tuple[int, ...] | None\n    Number of samples to generate. Can be:\n    - None: returns a single float\n    - int: returns 1D array of length `size`\n    - tuple: returns array with shape `size`\n\nReturns\n-------\nfloat | FloatArray\n    Single random float if size is None, otherwise numpy array\n    of random floats with specified shape.\n\nRaises\n------\nValueError\n    If size contains negative values or non-integer elements.\nTypeError\n    If size is not None, int, or tuple.\n\nExamples\n--------\n>>> from dynexus import RandomState\n>>> rng = RandomState(seed=42)\n>>>\n>>> # Single random number\n>>> x = rng.random()\n>>> print(f\"Single value: {x:.6f}\")\nSingle value: 0.374540\n>>> isinstance(x, float)\nTrue\n>>>\n>>> # 1D array\n>>> arr1d = rng.random(5)\n>>> print(f\"1D array shape: {arr1d.shape}\")\n1D array shape: (5,)\n>>> print(f\"Values: {arr1d}\")\nValues: [0.95071431 0.73199394 0.59865848 0.15601864 0.15599452]\n>>>\n>>> # 2D array\n>>> arr2d = rng.random((2, 3))\n>>> print(f\"2D array shape: {arr2d.shape}\")\n2D array shape: (2, 3)\n>>> print(f\"Values: {arr2d}\")\nValues: [[0.05808361 0.86617615 0.60111501]\n [0.70807258 0.02058449 0.96990985]]\n\nNotes\n-----\nThe uniform distribution over [0, 1) is:\n- P(x) = 1 for 0 ≤ x < 1\n- P(x) = 0 otherwise\n\nThis function uses numpy's Mersenne Twister implementation (MT19937)\nwhich has a period of 2^19937-1 and 623-dimensional equidistribution.\n\nFor generating random integers in a range, use randint().\nFor generating random numbers from other distributions, use the\nappropriate methods (normal(), uniform(), etc.).\n\nThread Safety:\nThis method is thread-safe due to internal locking. Multiple threads\ncan call random() concurrently without corrupting the random state."
            },
            {
              "name": "bytes",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "length",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "bytes",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate random bytes.\n\nParameters\n----------\nlength : int\n    Number of bytes to generate.\n\nReturns\n-------\nbytes\n    Random bytes."
            },
            {
              "name": "is_deterministic",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check if this random state is deterministic.\n\nReturns\n-------\nbool\n    True if seeded deterministically, False if using entropy."
            },
            {
              "name": "get_info",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get information about this random state.\n\nReturns\n-------\ndict[str, Any]\n    Information dictionary containing label, seed, operation count,\n    and other metadata."
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Representation."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "RandomStateManager",
          "bases": [],
          "docstring": "Manages random states with thread safety.\n\nThis singleton class provides centralized management of random states\nfor the entire framework. It maintains:\n\n1. **Global Deterministic State**: For reproducible simulations\n2. **Global Non-deterministic State**: For true randomness when needed\n3. **Thread-Local States**: For parallel execution with consistency\n4. **Context Management**: For isolated random operations\n\nThe manager ensures that all stochastic operations are\nproperly isolated and reproducible, which is essential for\nreasoning and scientific validation.\n\nAttributes\n----------\n_deterministic_state : RandomState\n    Global deterministic random state.\n_nondeterministic_state : RandomState\n    Global non-deterministic random state.\n_thread_local : threading.local\n    Thread-local storage for isolated random states.\n\nExamples\n--------\n>>> from dynexus import RandomStateManager\n>>>\n>>> # Get global deterministic state\n>>> manager = RandomStateManager()\n>>> rng = manager.get_deterministic_state()\n>>> samples = rng.normal(size=10)\n>>>\n>>> # Set global seed for reproducibility\n>>> manager.set_seed(42)\n>>>\n>>> # Create isolated context for intervention\n>>> with manager.isolated_context(seed=123) as isolated_rng:\n...     intervention_samples = isolated_rng.normal(size=5)",
          "methods": [
            {
              "name": "__new__",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "RandomStateManager",
              "decorators": [],
              "is_async": false,
              "docstring": "Create or return the singleton instance of RandomStateManager.\n\nThis method implements the singleton pattern to ensure only one instance\nof the random state manager exists in the entire framework.\nThe singleton pattern is essential for:\n\n1. **Global Consistency**: All parts of the system access the\n    same random states\n2. **Resource Management**: Single point of control for random state\n    allocation\n3. **Thread Safety**: Coordinated access across multiple threads\n4. **State Persistence**: Maintain random state history across the\n    application\n\nThe implementation uses double-checked locking with a reentrant lock to\nensure thread safety during singleton initialization.\n\nReturns\n-------\nRandomStateManager\n    The singleton instance of the random state manager.\n\nNotes\n-----\n- This is a class method but does not use the @classmethod decorator\n  because __new__ is a special method that is implicitly a class method.\n- The lock (`cls._lock`) is a class-level threading.RLock that ensures\n  thread safety during singleton creation.\n- The `_initialize()` method is called only once, when the singleton\n  is first created.\n\nThread Safety\n-------------\nThe implementation is thread-safe:\n1. The class-level lock prevents race conditions during initialization\n2. Double-checked locking pattern minimizes lock contention\n3. The instance check is performed inside the lock to prevent\n   the \"check-then-act\" race condition\n\nExamples\n--------\n>>> from dynexus import RandomStateManager\n>>>\n>>> # First call creates the singleton\n>>> manager1 = RandomStateManager()\n>>>\n>>> # Subsequent calls return the same instance\n>>> manager2 = RandomStateManager()\n>>> manager1 is manager2\nTrue\n>>>\n>>> # Thread-safe access from multiple threads\n>>> import threading\n>>> def get_manager():\n...     return RandomStateManager()\n>>>\n>>> threads = [threading.Thread(target=get_manager) for _ in range(10)]\n>>> for thread in threads:\n...     thread.start()\n>>> for thread in threads:\n...     thread.join()\n>>> # All threads received the same singleton instance\n\nSee Also\n--------\nthreading.RLock : The reentrant lock used for thread safety.\nsuper().__new__ : The parent class's __new__ method for actual instance creation."
            },
            {
              "name": "_initialize",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize the global random state manager."
            },
            {
              "name": "get_deterministic_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "RandomState",
              "decorators": [],
              "is_async": false,
              "docstring": "Get the global deterministic random state.\n\nThis state should be used for all operations requiring reproducibility,\nsuch as debugging, testing, and scientific validation.\n\nReturns\n-------\nRandomState\n    Global deterministic random state."
            },
            {
              "name": "get_nondeterministic_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "RandomState",
              "decorators": [],
              "is_async": false,
              "docstring": "Get the global non-deterministic random state.\n\nThis state should be used when true randomness is required,\nsuch as Monte Carlo simulations or stochastic optimization\nwhere exploration is desired.\n\nReturns\n-------\nRandomState\n    Global non-deterministic random state."
            },
            {
              "name": "get_thread_local_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "RandomState",
              "decorators": [],
              "is_async": false,
              "docstring": "Get thread-local random state.\n\nEssential for parallel execution where each thread needs its own\nisolated but reproducible random state. This method ensures that:\n\n1. Each thread gets its own isolated random state\n2. Random states are reproducible when seeds are provided\n3. Thread states are lazily initialized on first access\n4. The global deterministic state is used for seed generation when needed\n\nParameters\n----------\nseed : int | None\n    Seed for the thread-local state. If None, generates a deterministic\n    seed from thread ID and global state to ensure reproducibility\n    across runs while maintaining thread isolation.\n\n    Note: If seed is provided, it should be unique per thread to avoid\n    duplicate random sequences.\n\nReturns\n-------\nRandomState\n    Thread-local random state instance.\n\nRaises\n------\nValueError\n    If seed is provided but not an integer.\nRuntimeError\n    If thread-local storage cannot be initialized.\n\nExamples\n--------\n>>> from dynexus import RandomStateManager\n>>> manager = RandomStateManager()\n>>>\n>>> # Get thread-local state with automatic seeding\n>>> thread_rng = manager.get_thread_local_state()\n>>> samples = thread_rng.normal(size=5)\n>>>\n>>> # Get with explicit seed for reproducibility\n>>> thread_rng2 = manager.get_thread_local_state(seed=42)\n>>> samples2 = thread_rng2.normal(size=5)\n>>>\n>>> # In parallel execution:\n>>> import concurrent.futures\n>>> def process_data(thread_id: int):\n...     rng = manager.get_thread_local_state(seed=100 + thread_id)\n...     return rng.normal(size=3)\n>>>\n>>> with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:\n...     results = list(executor.map(process_data, range(3)))\n...     # Each thread has its own reproducible random state\n\nNotes\n-----\n- Thread-local states are stored in `threading.local()` storage,\n  which ensures each thread sees only its own instances.\n- When seed is None, the generated seed combines:\n  - A random sample from the global deterministic state\n  - The thread ID for uniqueness\n  - Modulo operation to ensure it's within valid seed range\n- Thread-local states persist until the thread terminates or\n  the thread-local storage is cleared.\n- For maximum reproducibility in parallel simulations, provide\n  explicit seeds to each thread."
            },
            {
              "name": "set_seed",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Set global deterministic seed.\n\nThis method resets the global deterministic random state with the\ngiven seed. All future operations using the deterministic state\nwill be reproducible.\n\nParameters\n----------\nseed : int\n    Random seed for global deterministic state.\n\nRaises\n------\nValueError\n    If seed is not an integer."
            },
            {
              "name": "isolated_context",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                },
                {
                  "name": "label",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "Generator[RandomState, None, None]",
              "decorators": [
                "contextmanager"
              ],
              "is_async": false,
              "docstring": "Context manager for isolated random operations.\n\nCreates a completely isolated random state for operations within\nthe context. This is useful for testing interventions, comparing\nscenarios, or isolating subsystems.\n\nParameters\n----------\nseed : int | None\n    Seed for the isolated context. If None, uses entropy.\nlabel : str\n    Label for the context.\n\nYields\n------\nRandomState\n    Isolated random state for use within the context.\n\nExamples\n--------\n>>> from dynexus import RandomStateManager\n>>> manager = RandomStateManager()\n>>> with manager.isolated_context(seed=42) as rng:\n...     samples = rng.normal(size=10)\n...     # These samples are completely isolated from global state"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [
        {
          "name": "_get_rng_state_tuple",
          "args": [
            {
              "name": "rng",
              "type": "normal",
              "annotation": "np.random.RandomState",
              "default": null
            }
          ],
          "returns": "RandomStateTuple",
          "decorators": [],
          "is_async": false,
          "docstring": "Get random state as typed dictionary.\n\nParameters\n----------\nrng : np.random.RandomState\n    Numpy RandomState instance.\n\nReturns\n-------\nRandomStateTuple\n    Random state as typed dictionary.\n\nRaises\n------\nTypeError\n    If get_state() doesn't return expected tuple format."
        },
        {
          "name": "_create_numpy_state_tuple",
          "args": [
            {
              "name": "rng_state",
              "type": "normal",
              "annotation": "RandomStateTuple",
              "default": null
            }
          ],
          "returns": "tuple[str, np.ndarray, int, int, float]",
          "decorators": [],
          "is_async": false,
          "docstring": "Create numpy random state tuple from typed dictionary.\n\nParameters\n----------\nrng_state : RandomStateTuple\n    Random state as typed dictionary.\n\nReturns\n-------\ntuple[str, np.ndarray, int, int, float]\n    Random state tuple ready for set_state()."
        },
        {
          "name": "set_random_seed",
          "args": [
            {
              "name": "seed",
              "type": "normal",
              "annotation": "int",
              "default": null
            }
          ],
          "returns": "None",
          "decorators": [],
          "is_async": false,
          "docstring": "Set global random seed for all stochastic operations.\n\nThis ensures reproducibility across the entire framework.\n\nParameters\n----------\nseed : int\n    Random seed to use.\n\nRaises\n------\nValueError\n    If seed is not a positive integer or zero.\n\nExamples\n--------\n>>> from dynexus import set_random_seed\n>>> set_random_seed(42)\n>>> # All subsequent stochastic operations will be reproducible"
        },
        {
          "name": "sampling_context",
          "args": [
            {
              "name": "seed",
              "type": "normal",
              "annotation": "int",
              "default": null
            },
            {
              "name": "name",
              "type": "normal",
              "annotation": "str",
              "default": null
            }
          ],
          "returns": "Generator[RandomState, None, None]",
          "decorators": [
            "contextmanager"
          ],
          "is_async": false,
          "docstring": "Create a context for reproducible sampling operations.\n\nUseful for:\n- Testing interventions\n- Comparing optimization strategies\n- Parallel simulation with deterministic results\n\nParameters\n----------\nseed : int\n    Random seed for the context.\nname : str\n    Context name for debugging.\n\nYields\n------\nRandomState\n    Random state for use within the context.\n\nRaises\n------\nValueError\n    If seed is not a positive integer or zero.\n\nExamples\n--------\n>>> from dynexus import sampling_context\n>>> with sampling_context(seed=42, name=\"test_intervention\") as rng:\n...     # All sampling in this block uses isolated, reproducible random state\n...     samples = rng.normal(size=100)"
        },
        {
          "name": "random_state_manager",
          "args": [],
          "returns": "RandomStateManager",
          "decorators": [],
          "is_async": false,
          "docstring": "Get the global random state manager.\n\nReturns\n-------\nRandomStateManager\n    Global random state manager instance.\n\nExamples\n--------\n>>> from dynexus import random_state_manager\n>>> manager = random_state_manager()\n>>> rng = manager.get_deterministic_state()"
        }
      ],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        }
      ]
    },
    "dynexus/core/variable/stochastic.py": {
      "path": "dynexus/core/variable/stochastic.py",
      "module_docstring": "Stochastic Variable implementations for probabilistic reasoning.",
      "imports": [
        {
          "module": "__future__",
          "name": "annotations",
          "alias": null,
          "level": 0
        },
        {
          "module": "math",
          "alias": null
        },
        {
          "module": "abc",
          "name": "ABC",
          "alias": null,
          "level": 0
        },
        {
          "module": "abc",
          "name": "abstractmethod",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "Enum",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "auto",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "TypeAlias",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "overload",
          "alias": null,
          "level": 0
        },
        {
          "module": "numpy",
          "alias": "np"
        },
        {
          "module": "scipy",
          "name": "stats",
          "alias": null,
          "level": 0
        },
        {
          "module": "scipy.stats._distn_infrastructure",
          "name": "rv_continuous_frozen",
          "alias": null,
          "level": 0
        },
        {
          "module": "scipy.stats._distn_infrastructure",
          "name": "rv_discrete_frozen",
          "alias": null,
          "level": 0
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.types",
          "name": "FloatArray",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.types",
          "name": "IntArray",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.types",
          "name": "Number",
          "alias": null,
          "level": 3
        },
        {
          "module": "base",
          "name": "BaseVariable",
          "alias": null,
          "level": 1
        },
        {
          "module": "base",
          "name": "VariableRole",
          "alias": null,
          "level": 1
        },
        {
          "module": "random",
          "name": "RandomState",
          "alias": null,
          "level": 1
        },
        {
          "module": "random",
          "name": "RandomStateManager",
          "alias": null,
          "level": 1
        }
      ],
      "classes": [
        {
          "name": "DistributionType",
          "bases": [
            "Enum"
          ],
          "docstring": "Enumeration of probability distribution types.\n\nDefines the statistical distributions that can be used to model stochastic\nvariables' uncertainty. Each distribution type represents a family of\nprobability distributions characterized by specific parameters and properties.\n\nAttributes\n----------\nNORMAL : DistributionType\n    Normal (Gaussian) distribution. Characterized by mean (mu) and standard\n    deviation (sigma). Symmetric, bell-shaped curve. Suitable for modeling\n    measurement errors, natural variations, and quantities affected by\n    many small independent factors.\n\nUNIFORM : DistributionType\n    Uniform distribution. Characterized by lower and upper bounds.\n    Constant probability density between bounds, zero elsewhere. Suitable\n    for modeling complete uncertainty within known bounds or random\n    selection from a finite interval.\n\nEXPONENTIAL : DistributionType\n    Exponential distribution. Characterized by rate (lambda)\n    or scale (beta = 1/lambda).\n    Models time between events in Poisson processes. Memoryless property.\n    Suitable for waiting times, failure times, and inter-arrival times.\n\nBETA : DistributionType\n    Beta distribution. Characterized by shape parameters alpha and beta.\n    Defined on interval [0, 1]. Flexible shape for modeling probabilities\n    and proportions. Conjugate prior for Bernoulli and binomial distributions.\n\nGAMMA : DistributionType\n    Gamma distribution. Characterized by shape (k) and scale (theta) parameters.\n    Generalization of exponential and chi-squared distributions. Suitable\n    for waiting times, rainfall amounts, and insurance claim modeling.\n\nPOISSON : DistributionType\n    Poisson distribution. Characterized by rate parameter lambda.\n    Discrete distribution for counting events in fixed interval.\n    Suitable for rare event counts, arrival processes, and defect counts.\n\nBERNOULLI : DistributionType\n    Bernoulli distribution. Characterized by success probability p.\n    Binary outcome distribution (0 or 1). Fundamental building block for\n    binary classification and success/failure modeling.\n\nCATEGORICAL : DistributionType\n    Categorical (multinoulli) distribution. Characterized by probability\n    vector over finite set of categories. Generalization of Bernoulli\n    to multiple discrete outcomes.\n\nMIXTURE : DistributionType\n    Mixture distribution. Weighted combination of other distributions.\n    Flexible modeling of multi-modal data and heterogeneous populations.\n\nEMPIRICAL : DistributionType\n    Empirical distribution. Derived directly from observed data.\n    Non-parametric representation using observed samples or histogram.\n\nNotes\n-----\nDistribution types are used in conjunction with DistributionParameters\nto fully specify a probability distribution. Each type requires specific\nparameters as defined by standard statistical notation.\n\nFor Bayesian inference, certain distributions serve as conjugate priors:\n- Beta is conjugate prior for Bernoulli/Binomial\n- Gamma is conjugate prior for Poisson\n- Normal is conjugate prior for Normal with known variance\n\nExamples\n--------\n>>> from dynexus import DistributionType\n>>>\n>>> # Common distributions for different modeling scenarios\n>>> measurement_error = DistributionType.NORMAL\n>>> unknown_bounded = DistributionType.UNIFORM\n>>> waiting_time = DistributionType.EXPONENTIAL\n>>> probability_estimate = DistributionType.BETA\n>>> count_data = DistributionType.POISSON\n>>> binary_outcome = DistributionType.BERNOULLI\n>>>\n>>> # Check if a distribution is continuous\n>>> continuous_dists = {\n...     DistributionType.NORMAL,\n...     DistributionType.UNIFORM,\n...     DistributionType.EXPONENTIAL,\n...     DistributionType.BETA,\n...     DistributionType.GAMMA\n... }\n>>>\n>>> # Check if a distribution is discrete\n>>> discrete_dists = {\n...     DistributionType.POISSON,\n...     DistributionType.BERNOULLI,\n...     DistributionType.CATEGORICAL\n... }",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "DistributionParameters",
          "bases": [],
          "docstring": "Container for probability distribution parameters.\n\nEncapsulates both the distribution type and its specific parameter values\nin a structured format. Provides factory methods for common distributions\nto ensure correct parameter naming and validation.\n\nParameters\n----------\ndistribution_type : DistributionType\n    Type of probability distribution (e.g., NORMAL, UNIFORM, EXPONENTIAL).\n\nparameters : dict[str, Any]\n    Dictionary of parameter names to values. Must match the expected\n    parameters for the specified distribution type.\n    - NORMAL: {\"mean\": float, \"std\": float}\n    - UNIFORM: {\"low\": float, \"high\": float}\n    - EXPONENTIAL: {\"scale\": float} or {\"rate\": float}\n    - BETA: {\"alpha\": float, \"beta\": float}\n    - GAMMA: {\"shape\": float, \"scale\": float} or {\"k\": float, \"theta\": float}\n    - POISSON: {\"lambda\": float} or {\"rate\": float}\n    - BERNOULLI: {\"p\": float}\n    - CATEGORICAL: {\"probabilities\": list[float]}\n    - MIXTURE: {\"weights\": list[float], \"components\": list[DistributionParameters]}\n    - EMPIRICAL: {\"samples\": list[float]} or {\"histogram\": dict}\n\nrandom_seed : int | None\n    Random seed for reproducible sampling from this distribution.\n    If None, uses the global deterministic random state.\n\nAttributes\n----------\ndistribution_type : DistributionType\n    See Parameters section.\n\nparameters : dict[str, Any]\n    See Parameters section.\n\nrandom_seed : int | None\n    See Parameters section.\n\nRaises\n------\nValueError\n    If parameters dictionary does not contain required keys for the\n    specified distribution type, or if parameter values are invalid\n    (e.g., negative standard deviation, low > high for uniform).\n\nNotes\n-----\nParameter names follow standard statistical conventions where possible.\nSome distributions have multiple parameterizations (e.g., exponential\ncan use rate lambda or scale beta = 1/lambda). The factory methods enforce\nconsistent parameter naming.\n\nFor mixture distributions, the weights should sum to 1. For categorical\ndistributions, probabilities should sum to 1.\n\nThe random_seed parameter ensures reproducible sampling. When specified,\nall samples from distributions using these parameters will be deterministic.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters, DistributionType\n>>>\n>>> # Using factory methods with reproducibility\n>>> normal_params = DistributionParameters.normal(mean=0.0, std=1.0, seed=42)\n>>> uniform_params = DistributionParameters.uniform(low=0.0, high=10.0, seed=42)\n>>> exp_params = DistributionParameters.exponential(scale=2.0, seed=42)\n>>> beta_params = DistributionParameters.beta(alpha=2.0, beta=5.0, seed=42)\n>>>\n>>> # Direct instantiation\n>>> gamma_params = DistributionParameters(\n...     distribution_type=DistributionType.GAMMA,\n...     parameters={\"shape\": 3.0, \"scale\": 2.0},\n...     random_seed=42\n... )\n>>>\n>>> # Accessing parameters\n>>> print(f\"Normal mean: {normal_params.parameters['mean']}\")\nNormal mean: 0.0\n>>> print(f\"Random seed: {normal_params.random_seed}\")\nRandom seed: 42",
          "methods": [
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate that parameters are appropriate for the distribution type.\n\nReturns\n-------\nbool\n    True if parameters are valid for the specified distribution type.\n\nRaises\n------\nValueError\n    If required parameters are missing or have invalid values.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.normal(mean=0.0, std=1.0)\n>>> params.validate()  # Should pass\nTrue"
            },
            {
              "name": "get_random_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "RandomState",
              "decorators": [],
              "is_async": false,
              "docstring": "Get a reproducible random state for this distribution.\n\nReturns\n-------\nRandomState\n    Random state initialized with this distribution's seed.\n    If no seed was specified, returns the global deterministic state.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.normal(mean=0, std=1, seed=42)\n>>> rng = params.get_random_state()\n>>> samples = rng.normal(size=10)  # Reproducible sampling"
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Representation of distribution parameters."
            },
            {
              "name": "__repr__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Detailed representation of distribution parameters."
            }
          ],
          "class_methods": [
            {
              "name": "normal",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "mean",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "std",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for a Gaussian distribution.\n\nParameters\n----------\nmean : float\n    Mean (mu) of the normal distribution. Represents the central\n    tendency or expected value.\nstd : float\n    Standard deviation (sigma) of the normal distribution. Must be positive.\n    Represents the spread or uncertainty. Variance = sigma^2.\nseed : int | None\n    Random seed for reproducible sampling. If None, uses global state.\n\nReturns\n-------\nDistributionParameters\n    Parameters for N(mu, sigma^2) distribution.\n\nRaises\n------\nValueError\n    If std is not positive.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.normal(mean=10.0, std=2.0, seed=42)\n>>> print(params.distribution_type)\nDistributionType.NORMAL\n>>> print(params.parameters[\"mean\"])\n10.0\n>>> print(params.random_seed)\n42"
            },
            {
              "name": "uniform",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "low",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "high",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for a uniform distribution.\n\nParameters\n----------\nlow : float\n    Lower bound of the uniform distribution (inclusive).\nhigh : float\n    Upper bound of the uniform distribution (exclusive for continuous,\n    inclusive definitions vary). Must be greater than low.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for U(low, high) distribution.\n\nRaises\n------\nValueError\n    If high <= low.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.uniform(low=0.0, high=1.0, seed=42)\n>>> print(params.parameters[\"low\"])\n0.0\n>>> print(params.parameters[\"high\"])\n1.0"
            },
            {
              "name": "exponential",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "scale",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for an exponential distribution.\n\nUses scale parameterization: f(x; beta) = (1/beta)exp(-x/beta) for x ≥ 0.\nAlternative rate parameterization: lambda = 1/beta.\n\nParameters\n----------\nscale : float\n    Scale parameter (beta). Must be positive. Mean = beta, variance = beta^2.\n    Equivalent to 1/sigma where sigma is the rate parameter.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for Exp(beta) distribution.\n\nRaises\n------\nValueError\n    If scale is not positive.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.exponential(scale=5.0, seed=42)\n>>> print(params.parameters[\"scale\"])\n5.0"
            },
            {
              "name": "beta",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "alpha",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "beta",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for a beta distribution.\n\nParameters\n----------\nalpha : float\n    First shape parameter (alpha). Must be positive. Often interpreted as\n    \"number of successes + 1\" in Bayesian context.\nbeta : float\n    Second shape parameter (beta). Must be positive. Often interpreted as\n    \"number of failures + 1\" in Bayesian context.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for Beta(alpha, beta) distribution.\n\nRaises\n------\nValueError\n    If alpha or beta is not positive.\n\nNotes\n-----\nThe beta distribution is defined on [0, 1]. Mean = alpha/(alpha+beta).\nUseful as conjugate prior for Bernoulli and binomial distributions.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.beta(alpha=2.0, beta=5.0, seed=42)\n>>> print(params.parameters[\"alpha\"])\n2.0"
            },
            {
              "name": "gamma",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "k",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "theta",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for a gamma distribution.\n\nParameters\n----------\nk : float\n    Shape parameter of the distribution.\ntheta : float\n    Scale parameter of the distribution.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for Gamma(k, theta) distribution.\n\nRaises\n------\nValueError\n    If k or theta is not positive.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.gamma(k=1.0, theta=1.0, seed=42)\n>>> params.parameters['k']\n1.0"
            },
            {
              "name": "poisson",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "lam",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for a poisson distribution.\n\nParameters\n----------\nlam : float\n    Rate parameter of the distribution.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for Pois(lambda) distribution.\n\nRaises\n------\nValueError\n    If lambda is not positive.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.poisson(lam=10.0, seed=42)\n>>> print(params.distribution_type)\nDistributionType.POISSON"
            },
            {
              "name": "bernoulli",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "p",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for a bernoulli distribution.\n\nParameters\n----------\np : float\n    Success probability of the distribution.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for Bern(p) distribution.\n\nRaises\n------\nValueError\n    If p is not between 0 and 1.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.bernoulli(p=0.3, seed=42)\n>>> print(params.distribution_type, params.random_seed)\nDistributionType.BERNOULLI 42"
            },
            {
              "name": "categorical",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "probabilities",
                  "type": "normal",
                  "annotation": "list[float]",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for a categorical distribution.\n\nParameters\n----------\nprobabilities : list[float]\n    List of the robabilities of the distribution.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for Cat(p_1, ..., p_k) distribution.\n\nRaises\n------\nValueError\n    If p_i is not between 0 and 1 and sum(p_i) is not 1.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.categorical(\n...     probabilities=[0.3, 0.2, 0.5], seed=42\n... )\n>>> print(params.parameters['probabilities'])\n[0.3, 0.2, 0.5]"
            },
            {
              "name": "mixture",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "weights",
                  "type": "normal",
                  "annotation": "list[float]",
                  "default": null
                },
                {
                  "name": "components",
                  "type": "normal",
                  "annotation": "list[DistributionParameters]",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for a categorical distribution.\n\nParameters\n----------\nprobabilities : list[float]\n    List of the robabilities of the distribution.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for mixture distribution.\n\nRaises\n------\nValueError\n    If p_i is not between 0 and 1 and sum(p_i) is not 1.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params1 = DistributionParameters.normal(mean=5.0, std=2.0, seed=42)\n>>> params2 = DistributionParameters.normal(mean=15.0, std=1.0, seed=42)\n>>> params = DistributionParameters.mixture(\n...     weights=[0.4, 0.6], components=[params1, params2], seed=42\n... )\n>>> print(params.parameters['components'][1])\nNORMAL(mean=15.0, std=1.0, seed=42)"
            },
            {
              "name": "empirical",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "samples",
                  "type": "normal",
                  "annotation": "list[float]",
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "DistributionParameters",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create parameters for an empirical distribution.\n\nParameters\n----------\nsamples : list[float]\n    List of the samples of the distribution.\nseed : int | None\n    Random seed for reproducible sampling.\n\nReturns\n-------\nDistributionParameters\n    Parameters for empirical distribution.\n\nRaises\n------\nValueError\n    If samples is empty.\n\nExamples\n--------\n>>> from dynexus import DistributionParameters\n>>> params = DistributionParameters.empirical(\n...     samples=[3, 5, -1, 5, 3], seed=42\n... )\n>>> print(params.parameters['samples'])\n[3, 5, -1, 5, 3]"
            }
          ],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "StochasticVariable",
          "bases": [
            "BaseVariable",
            "ABC"
          ],
          "docstring": "Base class for stochastic (probabilistic) variables.\n\nRepresents variables with inherent uncertainty modeled via probability\ndistributions. Supports sampling, density estimation, and Bayesian updates.\n\nStochastic variables are active reasoning entities that:\n1. Maintain probabilistic beliefs about their values\n2. Can resolve conflicts between competing mechanisms\n3. Can suggest optimal interventions based on their uncertainty\n4. Compute reliability metrics from their distribution properties\n5. Learn from historical data to improve their probabilistic models\n6. Guarantee reproducible sampling through isolated random states\n\nAttributes\n----------\ndistribution_params : DistributionParameters | None\n    Parameters defining the probability distribution.\nsamples : list[float]\n    History of sampled values.\n_distribution : stats.rv_continuous | stats.rv_discrete | None\n    Internal scipy distribution object.\n_random_state : RandomState\n    Isolated random state for reproducible sampling.\n_current_random_seed : int | None\n    Current random seed if deterministic.\n_random_state_checkpoints : dict[str, str]\n    Mapping of checkpoint names to checkpoint IDs.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import StochasticVariable, VariableRole\n>>>\n>>> class TemperatureSensor(StochasticVariable):\n...     def __init__(self, name: str, true_temp: float, noise_std: float):\n...         super().__init__(name, role=VariableRole.OBSERVATION)\n...         self.true_temp = true_temp\n...         self.noise_std = noise_std\n...\n...     def sample(self, n: int = 1) -> np.ndarray:\n...         noise = self._random_state.normal(0, self.noise_std, n)\n...         return self.true_temp + noise\n...\n>>> sensor = TemperatureSensor(\"temp_sensor\", 22.0, 0.5)\n>>> sensor.set_random_seed(42)  # Ensure reproducibility\n>>> samples = sensor.sample(1000)\n>>> print(f\"Mean: {samples.mean():.2f}, Std: {samples.std():.2f}\")\nMean: 22.01, Std: 0.49",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "distribution_params",
                  "type": "normal",
                  "annotation": "DistributionParameters | None",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                },
                {
                  "name": "random_seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a stochastic variable with reproducibility support.\n\nParameters\n----------\nname : str\n    Variable name.\ndescription : str, optional\n    Human-readable description.\nunits : str, optional\n    Physical units.\nrole : VariableRole, optional\n    Role in causal relationships.\ndistribution_params : DistributionParameters | None, optional\n    Parameters defining the probability distribution.\nconfig : dict[str, Any] | None, optional\n    Configuration dictionary.\nrandom_seed : int | None, optional\n    Random seed for reproducible sampling. If provided, ensures\n    deterministic behavior. Takes precedence over seed in distribution_params.\n\nRaises\n------\nValueError\n    If random_seed is provided but not an integer."
            },
            {
              "name": "_initialize_random_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "random_seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize reproducible random state for this variable.\n\nParameters\n----------\nrandom_seed : int | None\n    Random seed for deterministic behavior. If None, uses seed from\n    distribution_params or creates non-deterministic state.\n\nRaises\n------\nValueError\n    If random_seed is provided but not an integer."
            },
            {
              "name": "_initialize_distribution",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize the scipy distribution from parameters."
            },
            {
              "name": "set_random_seed",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "seed",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Set random seed for reproducible sampling.\n\nThis is essential for:\n- Debugging causal relationships\n- Validating counterfactual scenarios\n- Reproducing optimization results\n- Scientific validation\n\nParameters\n----------\nseed : int\n    Random seed to use for all future sampling.\n\nRaises\n------\nValueError\n    If seed is not an integer.\n\nExamples\n--------\n>>> variable.set_random_seed(42)\n>>> samples1 = variable.sample(100)\n>>> variable.set_random_seed(42)  # Reset to same seed\n>>> samples2 = variable.sample(100)  # Identical to samples1"
            },
            {
              "name": "checkpoint_random_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Create a checkpoint of the current random state.\n\nThis allows for:\n- Replaying specific scenarios\n- Debugging stochastic behavior\n- Comparing different intervention strategies\n- Serializing simulation state\n\nParameters\n----------\nname : str\n    Name for this checkpoint.\n\nReturns\n-------\nstr\n    Checkpoint ID that can be used to restore state.\n\nExamples\n--------\n>>> checkpoint = variable.checkpoint_random_state(\"before_intervention\")\n>>> # ... perform causal operations ...\n>>> variable.restore_random_state(checkpoint)  # Back to pre-intervention state"
            },
            {
              "name": "restore_random_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "checkpoint_id",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Restore random state from checkpoint.\n\nParameters\n----------\ncheckpoint_id : str\n    Checkpoint ID to restore.\n\nReturns\n-------\nbool\n    True if restoration was successful.\n\nExamples\n--------\n>>> checkpoint = variable.checkpoint_random_state(\"state1\")\n>>> variable.restore_random_state(checkpoint)\nTrue"
            },
            {
              "name": "get_random_state_info",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get information about current random state.\n\nReturns\n-------\ndict[str, Any]\n    Dictionary with random state information for debugging.\n\nExamples\n--------\n>>> info = variable.get_random_state_info()\n>>> print(info['random_seed'])\n42"
            },
            {
              "name": "sample",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "n",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "FloatArray | IntArray",
              "decorators": [
                "abstractmethod"
              ],
              "is_async": false,
              "docstring": "Sample from the variable's distribution with reproducibility.\n\nParameters\n----------\nn : int, optional\n    Number of samples to draw.\n\nReturns\n-------\nFloatArray | IntArray\n    Array of samples with appropriate dtype (float64 for continuous,\n    int64 for discrete distributions).\n\nNotes\n-----\nThis method guarantees reproducible results when:\n1. Random seed is set via set_random_seed()\n2. Random state checkpoints are used\n3. Sampling order is preserved\n4. No external random operations interfere\n\nExamples\n--------\n>>> variable.set_random_seed(42)\n>>> samples1 = variable.sample(100)\n>>> variable.set_random_seed(42)  # Reset to same seed\n>>> samples2 = variable.sample(100)  # Will be identical to samples1"
            },
            {
              "name": "pdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "pdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "FloatArray",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "pdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "FloatOrArray",
                  "default": null
                }
              ],
              "returns": "FloatOrArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate probability density at given point(s).\n\nParameters\n----------\nx : float | FloatArray\n    Point(s) at which to evaluate density.\n\nReturns\n-------\nfloat | FloatArray\n    Probability density.\n\nRaises\n------\nValueError\n    If no distribution is defined.\n\nExamples\n--------\n>>> density = variable.pdf(0.5)\n>>> densities = variable.pdf(np.array([0.1, 0.5, 0.9]))"
            },
            {
              "name": "cdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "cdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "FloatArray",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "cdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "FloatOrArray",
                  "default": null
                }
              ],
              "returns": "FloatOrArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate cumulative probability up to given point(s).\n\nParameters\n----------\nx : float | FloatArray\n    Point(s) at which to evaluate CDF.\n\nReturns\n-------\nfloat | FloatArray\n    Cumulative probability.\n\nRaises\n------\nValueError\n    If no distribution is defined.\n\nExamples\n--------\n>>> prob = variable.cdf(0.5)\n>>> probs = variable.cdf(np.array([0.1, 0.5, 0.9]))"
            },
            {
              "name": "quantile",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "q",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "quantile",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "q",
                  "type": "normal",
                  "annotation": "FloatArray",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "quantile",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "q",
                  "type": "normal",
                  "annotation": "FloatOrArray",
                  "default": null
                }
              ],
              "returns": "FloatOrArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate quantiles for given probability/probabilities.\n\nParameters\n----------\nq : float | FloatArray\n    Probability/probabilities (between 0 and 1).\n\nReturns\n-------\nfloat | FloatArray\n    Quantile values.\n\nRaises\n------\nValueError\n    If no distribution is defined.\nValueError\n    If q is outside [0, 1].\n\nExamples\n--------\n>>> median = variable.quantile(0.5)\n>>> quartiles = variable.quantile(np.array([0.25, 0.5, 0.75]))"
            },
            {
              "name": "entropy",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "float | None",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate entropy of the distribution.\n\nReturns\n-------\nfloat | None\n    Entropy in nats, or None if not calculable.\n\nExamples\n--------\n>>> entropy = variable.entropy()\n>>> if entropy is not None:\n...     print(f\"Entropy: {entropy:.3f} nats\")"
            },
            {
              "name": "kl_divergence",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "other",
                  "type": "normal",
                  "annotation": "StochasticVariable",
                  "default": null
                }
              ],
              "returns": "float | None",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate KL divergence between this and another distribution.\n\nParameters\n----------\nother : StochasticVariable\n    Other distribution to compare with.\n\nReturns\n-------\nfloat | None\n    KL divergence in nats, or None if not calculable.\n\nNotes\n-----\nThis is a Monte Carlo approximation if analytical solution not available.\n\nExamples\n--------\n>>> divergence = variable1.kl_divergence(variable2)\n>>> if divergence is not None:\n...     print(f\"KL divergence: {divergence:.3f}\")"
            },
            {
              "name": "bayesian_update",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "observation",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "observation_variance",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Perform Bayesian update given an observation.\n\nParameters\n----------\nobservation : float\n    Observed value.\nobservation_variance : float, optional\n    Variance of the observation noise.\n\nReturns\n-------\nbool\n    True if update was successful.\n\nNotes\n-----\nCurrently only implements conjugate prior update for normal distributions.\n\nExamples\n--------\n>>> variable.bayesian_update(observation=22.5, observation_variance=0.1)\nTrue"
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate a value for stochastic variable.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value could plausibly come from the distribution.\n\nExamples\n--------\n>>> is_valid = variable.validate(1.5)\n>>> print(f\"Value is valid: {is_valid}\")"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "NormalVariable",
          "bases": [
            "StochasticVariable"
          ],
          "docstring": "Normally distributed stochastic variable with reproducibility.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import NormalVariable, VariableRole\n>>>\n>>> # Create a normally distributed variable\n>>> temperature = NormalVariable(\n...     name=\"temperature\",\n...     description=\"Temperature with measurement noise\",\n...     units=\"Celsius\",\n...     role=VariableRole.OBSERVATION,\n...     mean=22.0,\n...     std=0.5,\n...     random_seed=42\n... )\n>>>\n>>> # Sample from distribution\n>>> samples = temperature.sample(1000)\n>>> print(f\"Sample mean: {samples.mean():.2f}, std: {samples.std():.2f}\")\nSample mean: 22.01, std: 0.49\n>>>\n>>> # Bayesian update with observation\n>>> temperature.bayesian_update(22.5, observation_variance=0.1)\nTrue",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "mean",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "std",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                },
                {
                  "name": "random_seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a normal variable.\n\nParameters\n----------\nname : str\n    Variable name.\nmean : float\n    Mean of the normal distribution.\nstd : float\n    Standard deviation of the normal distribution.\ndescription : str, optional\n    Human-readable description.\nunits : str, optional\n    Physical units.\nrole : VariableRole, optional\n    Role in causal relationships.\nconfig : dict[str, Any] | None, optional\n    Configuration dictionary.\nrandom_seed : int | None, optional\n    Random seed for reproducible sampling.\n\nRaises\n------\nValueError\n    If std is not positive."
            },
            {
              "name": "sample",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "n",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Sample from normal distribution.\n\nParameters\n----------\nn : int\n    Number of samples to draw.\n\nReturns\n-------\nFloatArray\n    Array of normally distributed samples."
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate value for normal distribution.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value is within 5 standard deviations of the mean."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "UniformVariable",
          "bases": [
            "StochasticVariable"
          ],
          "docstring": "Uniformly distributed stochastic variable with reproducibility.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import UniformVariable, VariableRole\n>>>\n>>> # Create a uniformly distributed variable\n>>> position = UniformVariable(\n...     name=\"position\",\n...     low=0.0,\n...     high=10.0,\n...     description=\"Random position in 1D space\",\n...     units=\"meters\",\n...     role=VariableRole.ENDOGENOUS,\n...     random_seed=42\n... )\n>>>\n>>> # All samples should be between 0 and 10\n>>> samples = position.sample(5000)\n>>> print(f\"Min: {samples.min():.2f}, Max: {samples.max():.2f}\")\nMin: 0.00, Max: 10.00",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "low",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "high",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                },
                {
                  "name": "random_seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a uniform variable.\n\nParameters\n----------\nname : str\n    Variable name.\nlow : float\n    Lower bound of uniform distribution.\nhigh : float\n    Upper bound of uniform distribution.\ndescription : str, optional\n    Human-readable description.\nunits : str, optional\n    Physical units.\nrole : VariableRole, optional\n    Role in causal relationships.\nconfig : dict[str, Any] | None, optional\n    Configuration dictionary.\nrandom_seed : int | None, optional\n    Random seed for reproducible sampling.\n\nRaises\n------\nValueError\n    If low >= high."
            },
            {
              "name": "sample",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "n",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Sample from uniform distribution.\n\nParameters\n----------\nn : int\n    Number of samples to draw.\n\nReturns\n-------\nFloatArray\n    Array of uniformly distributed samples."
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate value for uniform distribution.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value is within bounds (with small tolerance)."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "MixtureVariable",
          "bases": [
            "StochasticVariable"
          ],
          "docstring": "Mixture distribution variable (weighted combination of distributions).\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import MixtureVariable, NormalVariable, VariableRole\n>>>\n>>> # Create a bimodal mixture\n>>> comp1 = NormalVariable(\"comp1\", mean=0, std=1, random_seed=42)\n>>> comp2 = NormalVariable(\"comp2\", mean=5, std=1, random_seed=43)\n>>>\n>>> mixture = MixtureVariable(\n...     name=\"bimodal\",\n...     components=[comp1, comp2],\n...     weights=[0.7, 0.3],\n...     description=\"Bimodal mixture distribution\",\n...     role=VariableRole.ENDOGENOUS,\n...     random_seed=44\n... )\n>>>\n>>> # Samples will come from both modes\n>>> samples = mixture.sample(10000)\n>>> print(f\"Sample mean: {samples.mean():.2f}\")\nSample mean: 1.53",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "components",
                  "type": "normal",
                  "annotation": "list[StochasticVariable]",
                  "default": null
                },
                {
                  "name": "weights",
                  "type": "normal",
                  "annotation": "list[float] | None",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                },
                {
                  "name": "random_seed",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a mixture variable.\n\nParameters\n----------\nname : str\n    Variable name.\ncomponents : list[StochasticVariable]\n    List of component distributions.\nweights : list[float] | None, optional\n    Weights for each component. If None, uniform weights.\ndescription : str, optional\n    Human-readable description.\nunits : str, optional\n    Physical units.\nrole : VariableRole, optional\n    Role in causal relationships.\nconfig : dict[str, Any] | None, optional\n    Configuration dictionary.\nrandom_seed : int | None, optional\n    Random seed for reproducible sampling.\n\nRaises\n------\nValueError\n    If weights don't match components length."
            },
            {
              "name": "sample",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "n",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Sample from mixture distribution.\n\nParameters\n----------\nn : int\n    Number of samples to draw.\n\nReturns\n-------\nFloatArray\n    Array of samples from the mixture distribution."
            },
            {
              "name": "pdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "pdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "FloatArray",
                  "default": null
                }
              ],
              "returns": "FloatArray",
              "decorators": [
                "overload"
              ],
              "is_async": false,
              "docstring": null
            },
            {
              "name": "pdf",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "x",
                  "type": "normal",
                  "annotation": "FloatOrArray",
                  "default": null
                }
              ],
              "returns": "FloatOrArray",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate mixture density.\n\nParameters\n----------\nx : float | FloatArray\n    Point(s) at which to evaluate density.\n\nReturns\n-------\nfloat | FloatArray\n    Probability density at x."
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate value for mixture distribution.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value could plausibly come from any component."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        }
      ]
    },
    "dynexus/core/variable/meta.py": {
      "path": "dynexus/core/variable/meta.py",
      "module_docstring": "MetaVariable for managing metadata and cross-variable relationships.",
      "imports": [
        {
          "module": "__future__",
          "name": "annotations",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "datetime",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "timedelta",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "timezone",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "Enum",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "auto",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "numpy",
          "alias": "np"
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        },
        {
          "module": "base",
          "name": "BaseVariable",
          "alias": null,
          "level": 1
        },
        {
          "module": "base",
          "name": "VariableRole",
          "alias": null,
          "level": 1
        },
        {
          "module": "dateutil.parser",
          "alias": null
        }
      ],
      "classes": [
        {
          "name": "AggregationMethod",
          "bases": [
            "Enum"
          ],
          "docstring": "Methods for aggregating multiple variable values into a single consensus value.\n\nDefines the mathematical and statistical approaches for combining multiple\nvalue proposals into a single representative value. Each method has different\nproperties regarding robustness to outliers, sensitivity to confidence,\nand computational complexity.\n\nAttributes\n----------\nMEAN : AggregationMethod\n    Arithmetic mean (average) of all proposed values. Simple and fast but\n    sensitive to outliers. Formula: (Σ values) / n\n\nMEDIAN : AggregationMethod\n    Median (middle value) of all proposed values. More robust to outliers\n    than mean. For n values, median is the middle value when sorted\n    (or average of two middle values for even n).\n\nWEIGHTED_MEAN : AggregationMethod\n    Confidence-weighted average. Each value is weighted by its confidence\n    score. Formula: Σ(value_i * confidence_i) / Σ(confidence_i)\n    Most appropriate when confidence estimates are reliable.\n\nMAX_CONFIDENCE : AggregationMethod\n    Selects the value with the highest confidence score. Simple and\n    confidence-aware, but ignores information from other proposals.\n    May be unstable if confidence estimates are noisy.\n\nCONSENSUS : AggregationMethod\n    Finds clusters of similar values and selects from the largest cluster.\n    Robust to multiple conflicting proposals. Uses tolerance-based grouping\n    to identify value clusters before aggregation.\n\nBAYESIAN_FUSION : AggregationMethod\n    Bayesian inference combining prior distribution with proposal likelihoods.\n    Most statistically rigorous but computationally intensive. Assumes\n    proposal distributions are known or can be estimated.\n\nNotes\n-----\nThe choice of aggregation method depends on:\n1. Reliability of confidence estimates\n2. Presence of outliers or adversarial proposals\n3. Computational constraints\n4. Statistical properties required\n\nFor sensor fusion applications, WEIGHTED_MEAN or BAYESIAN_FUSION are often\nappropriate. For conflict resolution with untrusted sources, CONSENSUS\nor MEDIAN provide robustness.\n\nExamples\n--------\n>>> from dynexus import AggregationMethod, MetaVariable\n>>>\n>>> # Create a meta-variable with different aggregation methods\n>>> sensor_mean = MetaVariable(\n...     name=\"sensor_average\",\n...     aggregation_method=AggregationMethod.MEAN\n... )\n>>>\n>>> confidence_weighted = MetaVariable(\n...     name=\"confidence_weighted\",\n...     aggregation_method=AggregationMethod.WEIGHTED_MEAN\n... )\n>>>\n>>> robust_aggregator = MetaVariable(\n...     name=\"robust_value\",\n...     aggregation_method=AggregationMethod.CONSENSUS\n... )\n>>>\n>>> # Compare aggregation behaviors\n>>> print(f\"Mean aggregation: {AggregationMethod.MEAN.name}\")\n>>> Mean aggregation: MEAN\n>>> print(f\"Median (robust to outliers): {AggregationMethod.MEDIAN.name}\")\n>>> Median (robust to outliers): MEDIAN\n>>> print(f\"Bayesian (statistical): {AggregationMethod.BAYESIAN_FUSION.name}\")\n>>> Bayesian (statistical): BAYESIAN_FUSION",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "ConflictResolutionStrategy",
          "bases": [
            "Enum"
          ],
          "docstring": "Strategies for resolving conflicts between multiple value proposals.\n\nDefines approaches for handling situations where different sources propose\nconflicting values for the same variable. Each strategy represents a\ndifferent philosophy for trust allocation, recency weighting, and\nconflict management.\n\nAttributes\n----------\nHIGHEST_CONFIDENCE : ConflictResolutionStrategy\n    Selects the proposal with the highest confidence score. Assumes\n    confidence estimates are accurate and comparable across sources.\n    Simple but may ignore valuable information from other proposals.\n\nMOST_RECENT : ConflictResolutionStrategy\n    Selects the most recent proposal. Appropriate for time-sensitive\n    systems where newer information supersedes older information.\n    Requires accurate and synchronized timestamps.\n\nWEIGHTED_AVERAGE : ConflictResolutionStrategy\n    Computes a weighted average based on confidence scores and optionally\n    recency. Balances information from all sources while weighting by\n    estimated reliability.\n\nDOMAIN_SPECIFIC : ConflictResolutionStrategy\n    Uses domain-specific rules or heuristics to resolve conflicts.\n    May incorporate additional metadata, source reputation, or\n    contextual information not captured in confidence scores.\n\nCONSENSUS : ConflictResolutionStrategy\n    Looks for agreement among multiple proposals. May require a\n    minimum number of agreeing sources or a supermajority threshold.\n    Robust to outliers and adversarial sources.\n\nEXTERNAL_ARBITRATION : ConflictResolutionStrategy\n    Delegates conflict resolution to an external system, human operator,\n    or higher-level reasoning process. Used when automated resolution\n    is insufficient or when conflicts indicate systemic issues.\n\nNotes\n-----\nConflict resolution is distinct from aggregation:\n- **Aggregation** combines all proposals into a single value\n- **Conflict resolution** chooses between incompatible proposals\n\nIn practice, these are often used together: first attempt aggregation,\nand if conflicts persist (e.g., high variance), apply conflict resolution.\n\nThe choice of strategy depends on:\n1. Source reliability characteristics\n2. Consequences of incorrect resolution\n3. Availability of domain knowledge\n4. Time constraints for resolution\n\nExamples\n--------\n>>> from dynexus import ConflictResolutionStrategy, MetaVariable\n>>>\n>>> # Different conflict scenarios require different strategies\n>>> sensor_conflict = MetaVariable(\n...     name=\"sensor_data\",\n...     conflict_strategy=ConflictResolutionStrategy.HIGHEST_CONFIDENCE\n... )\n>>>\n>>> time_critical = MetaVariable(\n...     name=\"market_price\",\n...     conflict_strategy=ConflictResolutionStrategy.MOST_RECENT\n... )\n>>>\n>>> safety_critical = MetaVariable(\n...     name=\"reactor_temperature\",\n...     conflict_strategy=ConflictResolutionStrategy.CONSENSUS\n... )\n>>>\n>>> # Domain-specific resolution for specialized knowledge\n>>> medical_diagnosis = MetaVariable(\n...     name=\"patient_diagnosis\",\n...     conflict_strategy=ConflictResolutionStrategy.DOMAIN_SPECIFIC\n... )\n>>>\n>>> # External arbitration when automated resolution fails\n>>> legal_judgment = MetaVariable(\n...     name=\"legal_interpretation\",\n...     conflict_strategy=ConflictResolutionStrategy.EXTERNAL_ARBITRATION\n... )",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "ValueProposal",
          "bases": [],
          "docstring": "Represents a proposed value for a variable from a specific source.\n\nContains the proposed value along with metadata about its provenance,\nconfidence, timing, and additional context. ValueProposals are the\nfundamental unit of information in multi-source reasoning systems.\n\nParameters\n----------\nvalue : Any\n    The proposed value. Can be any type supported by the variable's\n    validation and serialization system. Typically numeric, categorical,\n    or structured data matching the variable's domain.\n\nconfidence : float\n    Confidence score for the proposed value, ranging from 0.0 to 1.0.\n    Represents the source's estimate of reliability or accuracy.\n    Should be calibrated appropriately for the application domain.\n\nsource : str\n    Identifier of the proposal source. Typically a mechanism ID,\n    sensor name, algorithm identifier, or human operator ID.\n    Used for provenance tracking and source-specific weighting.\n\ntimestamp : datetime\n    Time when the proposal was generated or observed. Should use\n    UTC timezone for consistency. Critical for recency-based\n    reasoning and temporal correlation.\n\nmetadata : dict[str, Any], optional\n    Additional context-specific metadata about the proposal.\n    May include:\n    - Measurement uncertainty or error bounds\n    - Processing pipeline information\n    - Quality metrics or flags\n    - Environmental conditions\n    - Source-specific parameters\n    Default: empty dictionary.\n\nAttributes\n----------\nvalue : Any\n    See Parameters section.\n\nconfidence : float\n    See Parameters section.\n\nsource : str\n    See Parameters section.\n\ntimestamp : datetime\n    See Parameters section.\n\nmetadata : dict[str, Any]\n    See Parameters section.\n\nRaises\n------\nValueError\n    If confidence is not in range [0.0, 1.0].\n\nNotes\n-----\nValueProposals are designed to be immutable once created. For updates\nor corrections, create a new ValueProposal rather than modifying\nan existing one.\n\nThe metadata field enables extensibility without modifying the core\nstructure. Metadata keys should follow naming conventions appropriate\nto the application domain.\n\nFor Bayesian reasoning systems, confidence may represent the inverse\nof variance or precision. For human-provided estimates, it may represent\nsubjective certainty.\n\nExamples\n--------\n>>> from datetime import datetime, timezone\n>>> from dynexus import ValueProposal\n>>>\n>>> # Basic temperature reading from a sensor\n>>> temp_proposal = ValueProposal(\n...     value=22.5,\n...     confidence=0.92,\n...     source=\"temperature_sensor_001\",\n...     timestamp=datetime.utcnow(),\n...     metadata={\n...         \"sensor_type\": \"thermocouple\",\n...         \"calibration_date\": \"2025-12-10\",\n...         \"sampling_rate\": \"1 Hz\",\n...         \"environment\": \"indoor_lab\"\n...     }\n... )\n>>>\n>>> # Financial forecast from an algorithm\n>>> forecast_proposal = ValueProposal(\n...     value=150.25,\n...     confidence=0.75,\n...     source=\"price_predictor_v2\",\n...     timestamp=datetime.now(timezone.utc),\n...     metadata={\n...         \"model_version\": \"2.1.3\",\n...         \"prediction_horizon\": \"24h\",\n...         \"input_features\": [\"volume\", \"sentiment\", \"trend\"],\n...         \"uncertainty_interval\": [140.5, 160.0]\n...     }\n... )\n>>>\n>>> # Medical diagnosis from a clinician\n>>> diagnosis_proposal = ValueProposal(\n...     value=\"Type_II_Diabetes\",\n...     confidence=0.88,\n...     source=\"dr_smith\",\n...     timestamp=datetime.now(timezone.utc),\n...     metadata={\n...         \"certainty_level\": \"high\",\n...         \"supporting_evidence\": [\"glucose_level\", \"family_history\"],\n...         \"differential_diagnoses\": [\"Prediabetes\", \"Type_I_Diabetes\"],\n...         \"notes\": \"Patient presents with classic symptoms\"\n...     }\n... )\n>>>\n>>> # Invalid confidence (should raise ValueError)\n>>> try:\n...     invalid_proposal = ValueProposal(\n...         value=10.0,\n...         confidence=1.5,  # Invalid: > 1.0\n...         source=\"test\",\n...         timestamp=datetime.now(timezone.utc)\n...     )\n... except ValueError as e:\n...     print(f\"Validation error: {e}\")\nValidation error: Confidence must be between 0.0 and 1.0, got 1.5",
          "methods": [
            {
              "name": "__post_init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate the ValueProposal after initialization.\n\nPerforms validation checks to ensure the proposal is well-formed.\nCurrently validates confidence range.\n\nRaises\n------\nValueError\n    If confidence is outside valid range [0.0, 1.0]."
            },
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert ValueProposal to dictionary representation.\n\nReturns\n-------\ndict[str, Any]\n    Dictionary containing all proposal data. Suitable for\n    serialization or transmission.\n\nExamples\n--------\n>>> from datetime import datetime\n>>> from dynexus import ValueProposal\n>>> proposal = ValueProposal(\n...     value=42.0,\n...     confidence=0.9,\n...     source=\"test\",\n...     timestamp=datetime(2025, 12, 10, 12, 0, 0)\n... )\n>>> data = proposal.to_dict()\n>>> print(data[\"value\"])\n42.0\n>>> print(data[\"source\"])\ntest"
            },
            {
              "name": "is_stale",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "max_age_seconds",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check if the proposal is stale (older than specified age).\n\nParameters\n----------\nmax_age_seconds : float, optional\n    Maximum age in seconds before proposal is considered stale.\n    Default: 3600.0 (1 hour).\n\nReturns\n-------\nbool\n    True if proposal is stale, False otherwise.\n\nExamples\n--------\n>>> from datetime import datetime, timezone, timedelta\n>>> from dynexus import ValueProposal\n>>>\n>>> # Fresh proposal (just created)\n>>> fresh = ValueProposal(\n...     value=10.0,\n...     confidence=0.9,\n...     source=\"test\",\n...     timestamp=datetime.now(timezone.utc)\n... )\n>>> print(fresh.is_stale(max_age_seconds=60))\nFalse\n>>>\n>>> # Old proposal (2 hours ago)\n>>> old = ValueProposal(\n...     value=10.0,\n...     confidence=0.9,\n...     source=\"test\",\n...     timestamp=datetime.now(timezone.utc) - timedelta(hours=2)\n... )\n>>> print(old.is_stale(max_age_seconds=3600))\nTrue"
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Representation of ValueProposal."
            },
            {
              "name": "__repr__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Detailed representation of ValueProposal."
            }
          ],
          "class_methods": [
            {
              "name": "from_dict",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "data",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "ValueProposal",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create ValueProposal from dictionary representation.\n\nParameters\n----------\ndata : dict[str, Any]\n    Dictionary containing proposal data. Must include:\n    - value: The proposed value\n    - confidence: Confidence score (0.0 to 1.0)\n    - source: Source identifier\n    - timestamp: ISO format timestamp string\n    - metadata: Optional metadata dictionary\n\nReturns\n-------\nValueProposal\n    Reconstructed ValueProposal instance.\n\nRaises\n------\nKeyError\n    If required fields are missing.\nValueError\n    If timestamp cannot be parsed or confidence is invalid.\n\nExamples\n--------\n>>> from dynexus import ValueProposal\n>>> data = {\n...     \"value\": 99.5,\n...     \"confidence\": 0.85,\n...     \"source\": \"sensor_01\",\n...     \"timestamp\": \"2025-12-10T10:30:45.123456\",\n...     \"metadata\": {\"unit\": \"Celsius\"}\n... }\n>>> proposal = ValueProposal.from_dict(data)\n>>> print(proposal.value)\n99.5\n>>> print(proposal.source)\nsensor_01"
            }
          ],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "MetaVariable",
          "bases": [
            "BaseVariable"
          ],
          "docstring": "Variable that manages relationships between other variables.\n\nMetaVariables can aggregate, reconcile, and reason about multiple\nrelated variables. They're useful for creating derived variables,\nmanaging conflicts, and implementing system-level reasoning.\n\nExamples\n--------\n>>> from datetime import datetime, timezone\n>>> from dynexus import MetaVariable, VariableRole, AggregationMethod, ValueProposal\n>>>\n>>> # Create a meta-variable that averages temperature sensors\n>>> avg_temp = MetaVariable(\n...     name=\"average_temperature\",\n...     description=\"Average of all temperature sensors\",\n...     units=\"Celsius\",\n...     role=VariableRole.OBSERVATION,\n...     aggregation_method=AggregationMethod.WEIGHTED_MEAN\n... )\n>>>\n>>> # Add value proposals from different sensors\n>>> proposals = [\n...     ValueProposal(22.5, 0.9, \"sensor_1\", datetime.now(timezone.utc)),\n...     ValueProposal(23.1, 0.8, \"sensor_2\", datetime.now(timezone.utc)),\n...     ValueProposal(21.9, 0.95, \"sensor_3\", datetime.now(timezone.utc)),\n... ]\n>>> for proposal in proposals:\n...     avg_temp.add_proposal(proposal)\n...\n>>> # Resolve to get consensus value\n>>> avg_temp.resolve()\n>>> True\n>>> print(f\"Average temperature: {avg_temp.value:.2f}°C\")\nAverage temperature: 22.47°C",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "aggregation_method",
                  "type": "normal",
                  "annotation": "AggregationMethod",
                  "default": null
                },
                {
                  "name": "conflict_strategy",
                  "type": "normal",
                  "annotation": "ConflictResolutionStrategy",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a MetaVariable.\n\nParameters\n----------\nname : str\n    Unique name of the meta-variable.\ndescription : str, optional\n    Human-readable description.\nunits : str, optional\n    Physical units.\nrole : VariableRole, optional\n    Role in causal relationships.\naggregation_method : AggregationMethod, optional\n    Method for aggregating multiple proposals.\nconflict_strategy : ConflictResolutionStrategy, optional\n    Strategy for resolving conflicts.\nconfig : dict, optional\n    Configuration dictionary."
            },
            {
              "name": "add_proposal",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ValueProposal",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Add a value proposal from a source.\n\nParameters\n----------\nproposal : ValueProposal\n    Proposed value with confidence and metadata.\n\nExamples\n--------\n>>> from datetime import datetime, timezone\n>>> from dynexus import MetaVariable, ValueProposal\n>>> meta = MetaVariable(name=\"test\")\n>>> proposal = ValueProposal(42.0, 0.9, \"source_1\", datetime.now(timezone.utc))\n>>> meta.add_proposal(proposal)"
            },
            {
              "name": "clear_proposals",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Clear all pending proposals."
            },
            {
              "name": "resolve",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "force",
                  "type": "normal",
                  "annotation": "bool",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Resolve proposals into a single value.\n\nParameters\n----------\nforce : bool, optional\n    If True, force resolution even with conflicting proposals.\n\nReturns\n-------\nbool\n    True if resolution was successful.\n\nRaises\n------\nValueError\n    If no proposals available or aggregation fails."
            },
            {
              "name": "_aggregate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ValueProposal]",
                  "default": null
                }
              ],
              "returns": "tuple[Any, float]",
              "decorators": [],
              "is_async": false,
              "docstring": "Aggregate multiple proposals into a single value.\n\nParameters\n----------\nproposals : list[ValueProposal]\n    List of value proposals to aggregate.\n\nReturns\n-------\ntuple[Any, float]\n    Aggregated value and confidence.\n\nRaises\n------\nValueError\n    If aggregation method is not implemented or fails."
            },
            {
              "name": "_aggregate_mean",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ValueProposal]",
                  "default": null
                }
              ],
              "returns": "tuple[float, float]",
              "decorators": [],
              "is_async": false,
              "docstring": "Aggregate using arithmetic mean."
            },
            {
              "name": "_aggregate_median",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ValueProposal]",
                  "default": null
                }
              ],
              "returns": "tuple[float, float]",
              "decorators": [],
              "is_async": false,
              "docstring": "Aggregate using median."
            },
            {
              "name": "_aggregate_weighted_mean",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ValueProposal]",
                  "default": null
                }
              ],
              "returns": "tuple[float, float]",
              "decorators": [],
              "is_async": false,
              "docstring": "Aggregate using confidence-weighted mean."
            },
            {
              "name": "_aggregate_max_confidence",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ValueProposal]",
                  "default": null
                }
              ],
              "returns": "tuple[Any, float]",
              "decorators": [],
              "is_async": false,
              "docstring": "Select proposal with maximum confidence."
            },
            {
              "name": "_aggregate_consensus",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ValueProposal]",
                  "default": null
                }
              ],
              "returns": "tuple[Any, float]",
              "decorators": [],
              "is_async": false,
              "docstring": "Find consensus among proposals.\n\nLooks for clusters of similar values and selects the largest cluster."
            },
            {
              "name": "detect_conflict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "threshold",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Detect if proposals contain significant conflicts.\n\nParameters\n----------\nthreshold : float, optional\n    Confidence threshold for considering a conflict (default: 0.5).\n\nReturns\n-------\nbool\n    True if significant conflict detected."
            },
            {
              "name": "_check_numeric_conflict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ValueProposal]",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check for numeric conflicts (can raise exceptions)."
            },
            {
              "name": "_check_string_conflict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ValueProposal]",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Fallback string comparison."
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate a value for this meta-variable.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value is valid."
            },
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert meta-variable to dictionary."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        }
      ]
    },
    "dynexus/core/variable/variable.py": {
      "path": "dynexus/core/variable/variable.py",
      "module_docstring": "Unified Variable abstraction with action proposal capabilities.\n\nA Variable in CCES is an active, reasoning agent that:\n- Holds a current value (state)\n- Reasons about its relationships\n- Proposes actions based on reasoning\n- Explains its own state and proposals\n\nKey Design Principles:\n---------------------\n1. **Variables propose, don't execute**: Separation of concerns between\n   reasoning (Variable) and execution (Executive)\n2. **Autonomous reasoning**: Each Variable has its own reasoning capabilities\n3. **Self-explanation**: Variables can explain their state and decisions\n4. **Proposal history**: Track all proposals for learning and debugging\n\nThis is the core abstraction that implements the \"Variable\" role in CCES:\n    Variable → Runtime → Optimizer → Executive",
      "imports": [
        {
          "module": "time",
          "alias": null
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "numpy",
          "alias": "np"
        },
        {
          "module": "math.statistics.utils",
          "name": "StatisticalUtils",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        },
        {
          "module": "action.proposal",
          "name": "ActionProposal",
          "alias": null,
          "level": 2
        },
        {
          "module": "reason",
          "name": "ReasoningResult",
          "alias": null,
          "level": 1
        },
        {
          "module": "reason",
          "name": "ReasoningTask",
          "alias": null,
          "level": 1
        },
        {
          "module": "reason",
          "name": "VariableReasoner",
          "alias": null,
          "level": 1
        }
      ],
      "classes": [
        {
          "name": "Variable",
          "bases": [
            "VariableReasoner"
          ],
          "docstring": "Active variable with reasoning and proposal capabilities.\n\nVariables are the primary \"agents\" in CCES that:\n- Monitor their own state and relationships\n- Detect anomalies and plan interventions\n- Generate action proposals for the Executive to execute\n- Maintain history of their reasoning and proposals\n\nVariables NEVER execute actions directly. They only propose actions,\nwhich are then validated by Runtime, selected by Optimizer, and\nexecuted by Executive. This separation ensures consistency\nand enables centralized coordination.\n\nThis class provides default implementations for all abstract methods:\n- detect_anomaly(): Z-score based anomaly detection\n- resolve_conflict(): Weighted average or maximum confidence resolution\n- validate(): Basic type and range validation\n\nAttributes\n----------\n_proposal_history : List[ActionProposal]\n    Historical record of all proposals generated by this Variable.\n    Used for learning, debugging, and explanation generation.\n\nInherits from VariableReasoner:\n------------------------------\n- name : str - Human-readable name\n- initial_value : Any - Initial value/state\n- units : Optional[str] - Measurement units\n- initial_confidence : float - Initial confidence in [0.0, 1.0]\n- reasoning_history : list[ReasoningResult] - History of reasoning outcomes\n- reasoning_config : dict[str, Any] - Configuration for reasoning methods\n\nExamples\n--------\n>>> from dynexus import Variable\n>>> # Create a temperature variable with default reasoning\n>>> temp_var = Variable(\n...     name=\"RoomTemperature\",\n...     initial_value=22.5,\n...     units=\"°C\",\n...     initial_confidence=0.9\n... )\n>>> # Variable can detect anomalies out of the box\n>>> anomaly_result = temp_var.detect_anomaly()\n>>> anomaly_result.success\nFalse\n>>> # Variable can resolve conflicts between proposals\n>>> proposals = [(21.0, 0.8, \"controller1\"), (23.0, 0.6, \"controller2\")]\n>>> conflict_result = temp_var.resolve_conflict(proposals)\n>>> conflict_result.result[\"resolved_value\"]\n21.8...  # Weighted average\n>>> # Variable validates new values\n>>> temp_var.validate(25.0)\nTrue",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "args",
                  "type": "*args",
                  "annotation": "Any"
                },
                {
                  "name": "kwargs",
                  "type": "**kwargs",
                  "annotation": "Any"
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a Variable with default reasoning capabilities.\n\nSets up default configuration for anomaly detection, conflict resolution,\nand validation. Initializes empty proposal history.\n\nParameters\n----------\n*args, **kwargs\n    Passed to VariableReasoner.__init__()\n    Typically includes: name, initial_value, units, initial_confidence\n\nNotes\n-----\n- Initializes empty proposal\n- Sets default action proposal configuration\n- Calls parent VariableReasoner constructor"
            },
            {
              "name": "_get_config",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "kwargs",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "Any",
              "decorators": [],
              "is_async": false,
              "docstring": "Set up default configuration for action proposal tasks.\n\nEstablishes sensible defaults for all action proposal parameters. User-provided\nconfiguration (from __init__) takes precedence over these defaults.\n\nThe configuration is organized hierarchically by action proposal task category:\n- conflict_resolution: Parameters for conflict resolution algorithms\n- validation: Parameters for value validation methods\n\nNotes\n-----\nThese default values are added to super class' default config\n\nSubclasses should override this method to provide domain-specific defaults."
            },
            {
              "name": "detect_anomaly",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Detect anomalies in the variable's historical values using Z-score method.\n\nThis default implementation uses statistical Z-score detection:\n1. Collects historical values from the variable's history\n2. Calculates mean and standard deviation\n3. Computes Z-score for current value: z = |(value - mean) / std|\n4. Flags anomaly if z > threshold\n\nThe method handles edge cases gracefully:\n- Insufficient history: Returns failed result with explanation\n- Zero standard deviation: Returns non-anomaly result\n- Non-numeric values: Returns failed result (requires custom implementation)\n\nReturns\n-------\nReasoningResult\n    Result containing anomaly detection outcome with fields:\n    - 'is_anomaly': bool indicating if anomaly detected\n    - 'severity': float in [0.0, 1.0] based on Z-score relative to threshold\n    - 'z_score': float computed Z-score\n    - 'expected_range': tuple of (mean - threshold*std, mean + threshold*std)\n    - 'anomaly_type': 'high' or 'low' based on deviation direction\n\nNotes\n-----\nConfiguration via self.reasoning_config['anomaly_detection']:\n- threshold: Z-score threshold (default 3.0, ~99.7% confidence for normal dist)\n- window_size: Number of historical points to consider (default 50)\n- min_history: Minimum history needed for detection (default 5)\n- method: Always 'z_score' for this implementation\n\nFor domain-specific anomaly detection, override this method in a subclass.\n\nExamples\n--------\n>>> from dynexus import Variable\n>>> temp_var = Variable(name=\"Temperature\", initial_value=30.0)\n>>> # Add some normal history\n>>> for val in [22.0, 22.5, 21.8, 23.0, 22.2]:\n...     temp_var.update(val, 0.9)\n>>> result = temp_var.detect_anomaly()\n>>> result.result['is_anomaly']\nTrue  # 30.0 is far from mean ~22.3\n>>> result.result['severity'] > 0.5\nTrue"
            },
            {
              "name": "resolve_conflict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[tuple[Any, float, str]]",
                  "default": null
                }
              ],
              "returns": "ReasoningResult",
              "decorators": [],
              "is_async": false,
              "docstring": "Resolve conflicts between multiple value proposals.\n\nThis default implementation provides two strategies:\n1. **For numeric values**: Weighted average based on confidence scores\n2. **For non-numeric values**: Maximum confidence selection\n\nParameters\n----------\nproposals : lsit[Tuple[Any, float, str]]\n    List of proposed values as tuples of:\n    - value: The proposed value (any type)\n    - confidence: Confidence in the proposal [0.0, 1.0]\n    - source_name: Identifier for the proposal source\n\nReturns\n-------\nReasoningResult\n    Result containing the resolved value with fields:\n    - 'resolved_value': The chosen value after conflict resolution\n    - 'resolution_method': str describing method used\n    - 'source_contributions': dict mapping source names to influence weights\n    - 'consensus_degree': float indicating level of agreement (0-1)\n    For numeric resolutions:\n    - 'weighted_mean': The computed weighted average\n    For non-numeric resolutions:\n    - 'selected_source': The source with maximum confidence\n    - 'selected_confidence': The confidence of selected source\n\nRaises\n------\nValueError\n    If `proposals` list is empty.\n\nNotes\n-----\nConfiguration via self.reasoning_config['conflict_resolution']:\n- numeric_method: 'weighted_average' (default) or 'simple_average'\n- non_numeric_method: 'maximum_confidence' (default) or 'first_source'\n- require_consensus: Whether to fail if no clear winner (default False)\n\nThe method automatically detects value types and chooses appropriate strategy.\nAll values in proposals must be of compatible types for meaningful resolution.\n\nExamples\n--------\n>>> # Numeric conflict resolution\n>>> proposals = [(21.0, 0.8, \"thermostat\"), (23.0, 0.6, \"sensor\")]\n>>> result = temp_var.resolve_conflict(proposals)\n>>> result.result['resolved_value']\n21.8  # Weighted average: (21*0.8 + 23*0.6) / (0.8+0.6)\n>>> result.result['resolution_method']\n'confidence_weighted_average'\n\n>>> # Non-numeric conflict resolution\n>>> proposals = [(\"active\", 0.9, \"system\"), (\"standby\", 0.7, \"backup\")]\n>>> result = status_var.resolve_conflict(proposals)\n>>> result.result['resolved_value']\n'active'  # Higher confidence (0.9 > 0.7)\n>>> result.result['resolution_method']\n'maximum_confidence'"
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate a proposed value for this variable.\n\nThis default implementation provides basic validation:\n1. **Type checking**: If allowed_types configured, check value type\n2. **Range validation**: If min/max configured, check numeric bounds\n3. **Custom validators**: Apply any provided validation functions\n\nParameters\n----------\nvalue : Any\n    The value to validate\n\nReturns\n-------\nbool\n    True if value passes all validation checks, False otherwise\n\nNotes\n-----\nConfiguration via self.reasoning_config['validation']:\n- check_type: Whether to enforce type checking (default True)\n- allowed_types: List of allowed types (default None = any type)\n- check_range: Whether to check numeric ranges (default False)\n- min_value: Minimum allowed value (if numeric)\n- max_value: Maximum allowed value (if numeric)\n- custom_validators: List of callables taking (value, variable) -> bool\n\nThis is a basic validation method. For complex validation logic\n(e.g., dependent on other variables, temporal constraints), override\nin a subclass.\n\nExamples\n--------\n>>> # Configure temperature variable with validation\n>>> temp_var.reasoning_config['validation'] = {\n...     'check_type': True,\n...     'allowed_types': [int, float],\n...     'check_range': True,\n...     'min_value': 15.0,\n...     'max_value': 30.0\n... }\n>>> temp_var.validate(22.5)\nTrue\n>>> temp_var.validate(35.0)  # Above max\nFalse\n>>> temp_var.validate(\"hot\")  # Wrong type\nFalse\n\n>>> # Add custom validator\n>>> def not_too_rapid_change(value, variable):\n...     if variable.history:\n...         last_value = variable.history[-1][1]\n...         return abs(value - last_value) < 5.0\n...     return True\n>>> temp_var.reasoning_config['validation']['custom_validators'] = [not_too_rapid_change]"
            },
            {
              "name": "propose_actions",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "list[ActionProposal]",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate action proposals based on reasoning outcomes.\n\nThis is the primary method Variables use to participate in CCES.\nIt triggers multiple reasoning pathways and converts reasoning\nresults into actionable proposals for the Executive.\n\nReasoning Pathways:\n------------------\n1. Anomaly Detection: Detect unexpected values and propose corrections\n2. Intervention Planning: Plan interventions to reach target values\n3. Constraint Pressure: Respond to system constraints/pressures\n\nParameters\n----------\ncontext : Optional[dict[str, Any]]\n    Current system context that may include:\n    - target_values: Dict[name -> target] for intervention planning\n    - constraints: System constraints affecting variable behavior\n    - resources: Available resources for proposed actions\n    - time: Current simulation/time context\n    Defaults to empty dict if None.\n\nReturns\n-------\nlist[ActionProposal]\n    List of proposals generated by this Variable. May be empty if\n    no actions are warranted given current reasoning and context.\n    Each proposal includes:\n    - variable_id/name: Identifies proposing Variable\n    - action_type: Type of action (\"correct_anomaly\", \"intervention\", etc.)\n    - target: What to act upon (often the Variable itself)\n    - parameters: Action-specific details\n    - justification: Natural language explanation\n    - confidence: Variable's confidence in proposal effectiveness\n\nNotes\n-----\n- Proposals are added to _proposal_history for tracking\n- Multiple proposals may be generated from different reasoning pathways\n- Empty list indicates Variable sees no need for action\n- Proposals are hypotheses, not commitments to act\n\nExamples\n--------\n>>> # Variable detects anomaly and proposes correction\n>>> temp_var.value = 30.0  # Unexpectedly high\n>>> proposals = temp_var.propose_actions()\n>>> len(proposals)\n1\n>>> proposal = proposals[0]\n>>> proposal.action_type\n'correct_anomaly'\n>>> proposal.target\n'RoomTemperature'\n\n>>> # Variable plans intervention to reach target\n>>> context = {\"target_values\": {\"RoomTemperature\": 21.0}}\n>>> proposals = temp_var.propose_actions(context)\n>>> len(proposals)\n2\n>>> proposal = proposals[0]\n>>> proposal.action_type\n'intervention'"
            },
            {
              "name": "_from_anomaly_detection",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "list[ActionProposal]",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate proposals from anomaly detection reasoning.\n\nCalls detect_anomaly() and converts any detected anomalies into\ncorrection proposals.\n\nParameters\n----------\ncontext : dict[str, Any]\n    System context (may influence anomaly detection thresholds)\n\nReturns\n-------\nlist[ActionProposal]\n    List containing 0 or 1 proposals. Single proposal if anomaly\n    detected, empty list otherwise.\n\nNotes\n-----\n- Uses self.detect_anomaly() (default Z-score implementation)\n- Only generates proposals for significant anomalies\n- Proposal includes expected_value for correction"
            },
            {
              "name": "_from_intervention_planning",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "list[ActionProposal]",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate proposals from intervention planning reasoning.\n\nPlans interventions to reach target values specified in context.\n\nParameters\n----------\ncontext : dict[str, Any]\n    Must contain \"target_values\" dict mapping variable names to\n    target values. Only plans intervention if this Variable's\n    name is in target_values.\n\nReturns\n-------\nlist[ActionProposal]\n    List containing 0 or 1 intervention proposals.\n\nNotes\n-----\n- Uses self.plan_intervention() from VariableReasoner\n- Only plans if target value differs from current value\n- Proposal includes target_value parameter for Executive"
            },
            {
              "name": "_from_constraint_pressure",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "list[ActionProposal]",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate proposals from constraint/pressure reasoning.\n\n[PLACEHOLDER - To be implemented based on constraint system]\n\nParameters\n----------\ncontext : dict[str, Any]\n    System constraints and pressures\n\nReturns\n-------\nlist[ActionProposal]\n    Currently returns empty list. To be implemented when\n    constraint system is defined.\n\nNotes\n-----\n- Future implementation will respond to system constraints\n- May propose adjustments to relieve constraint violations"
            },
            {
              "name": "explain",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "mode",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "Any",
              "decorators": [],
              "is_async": false,
              "docstring": "Explain why the variable currently has its value and recent behavior.\n\nProvides transparency into Variable's state, reasoning, and proposals.\nEssential for debugging, monitoring, and trust in autonomous systems.\n\nParameters\n----------\nmode : str, default=\"text\"\n    Explanation format:\n    - \"dict\": Structured dictionary suitable for programmatic use\n    - \"text\": Human-readable narrative for logs/interfaces\n\nReturns\n-------\nAny\n    - If mode=\"dict\": Dict with structured explanation\n    - If mode=\"text\": String with human-readable explanation\n\nRaises\n------\nValueError\n    If mode is not \"dict\" or \"text\"\n\nExamples\n--------\n>>> # Get structured explanation\n>>> explanation = temp_var.explain(mode=\"dict\")\n>>> explanation.keys()\ndict_keys(['name', 'value', 'units', 'confidence', 'recent_reasoning', 'latest_proposal'])\n\n>>> # Get human-readable explanation\n>>> text = temp_var.explain(mode=\"text\")\n>>> print(text)\nRoomTemperature = 22.5 °C\nReasons:\n - Detected normal operating range\n - Planning intervention to 21.0\nLatest proposal: Planned intervention to reach 21.0"
            },
            {
              "name": "_format_text_explanation",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "explanation",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Format structured explanation as human-readable text.\n\nParameters\n----------\nexplanation : dict[str, Any]\n    Structured explanation from explain(mode=\"dict\")\n\nReturns\n-------\nstr\n    Human-readable narrative"
            },
            {
              "name": "get_proposal_history",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "limit",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                }
              ],
              "returns": "list[ActionProposal]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get historical proposals generated by this Variable.\n\nParameters\n----------\nlimit : Optional[int]\n    Maximum number of recent proposals to return.\n    If None, returns all proposals.\n\nReturns\n-------\nlist[ActionProposal]\n    List of proposals, most recent last.\n\nExamples\n--------\n>>> # Get last 5 proposals\n>>> recent = temp_var.get_proposal_history(limit=5)\n>>> len(recent) <= 5\nTrue\n\n>>> # Get all proposals\n>>> all_proposals = temp_var.get_proposal_history()\n>>> len(all_proposals) == len(temp_var._proposal_history)\nTrue"
            },
            {
              "name": "clear_proposal_history",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Clear all proposal history.\n\nUseful for resetting Variable state or managing memory.\nDoes not affect current value or reasoning history.\n\nExamples\n--------\n>>> len(temp_var._proposal_history)\n10\n>>> temp_var.clear_proposal_history()\n>>> len(temp_var._proposal_history)\n0"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        }
      ]
    },
    "dynexus/core/variable/base.py": {
      "path": "dynexus/core/variable/base.py",
      "module_docstring": "Base Variable class.\n\nDefines the fundamental Variable abstraction that represents a quantity with\nstate, uncertainty, constraints, and reasoning capabilities.",
      "imports": [
        {
          "module": "__future__",
          "name": "annotations",
          "alias": null,
          "level": 0
        },
        {
          "module": "abc",
          "name": "ABC",
          "alias": null,
          "level": 0
        },
        {
          "module": "abc",
          "name": "abstractmethod",
          "alias": null,
          "level": 0
        },
        {
          "module": "collections.abc",
          "name": "Callable",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "datetime",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "timezone",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "Enum",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "auto",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "ClassVar",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "TypeVar",
          "alias": null,
          "level": 0
        },
        {
          "module": "utils.id",
          "name": "generate_id",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.mixin",
          "name": "ObservableMixin",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.mixin",
          "name": "SerializableMixin",
          "alias": null,
          "level": 3
        },
        {
          "module": "utils.mixin",
          "name": "StatefulMixin",
          "alias": null,
          "level": 3
        }
      ],
      "classes": [
        {
          "name": "VariableRole",
          "bases": [
            "Enum"
          ],
          "docstring": "Defines the role of a variable within the system.\n\nThe role determines how a variable participates in causal relationships,\nhow it's updated, and what constraints apply to it. This classification\nis fundamental for reasoning and intervention planning.\n\nAttributes\n----------\nENDOGENOUS : VariableRole\n    Internal variable determined by the system's mechanisms. These variables\n    evolve according to the system dynamics and are typically outputs of\n    mechanisms. Example: Room temperature in a climate control system.\n\nEXOGENOUS : VariableRole\n    External input variable that influences the system but is not determined\n    by it. These are typically boundary conditions or external drivers.\n    Example: Outdoor temperature affecting a building's thermal dynamics.\n\nINTERVENTION : VariableRole\n    Variable that can be directly manipulated by external interventions.\n    These are targets for optimization or control actions.\n    Example: Thermostat setpoint that can be adjusted.\n\nOBSERVATION : VariableRole\n    Variable that can be observed/measured but not directly manipulated.\n    These provide information about the system state but aren't control knobs.\n    Example: Sensor readings from installed equipment.\n\nLATENT : VariableRole\n    Unobserved variable that has influence on the system. These\n    represent hidden states or factors that must be inferred.\n    Example: Unmeasured internal stress in a mechanical component.\n\nRESOURCE : VariableRole\n    Variable representing a consumable or limited resource. These have\n    availability constraints and depletion dynamics.\n    Example: Battery charge level, fuel quantity, budget allocation.\n\nCONSTRAINT : VariableRole\n    Variable representing a system constraint or boundary condition.\n    These define limits that must not be violated.\n    Example: Maximum allowable pressure, minimum safety margin.\n\nNotes\n-----\nVariable roles are not mutually exclusive - a variable can have multiple\naspects (e.g., both ENDOGENOUS and OBSERVATION). However, the primary role\ndetermines its core behavior in reasoning.\n\nExamples\n--------\n>>> from dynexus import VariableRole\n>>>\n>>> # Define roles for different variables in a power grid\n>>> generator_output = VariableRole.ENDOGENOUS  # Determined by generator dynamics\n>>> wind_speed = VariableRole.EXOGENOUS         # External weather condition\n>>> demand_setpoint = VariableRole.INTERVENTION # Can be adjusted by operators\n>>> line_temperature = VariableRole.OBSERVATION # Measured but not controlled\n>>> equipment_wear = VariableRole.LATENT        # Unmeasured but affects reliability\n>>> fuel_supply = VariableRole.RESOURCE         # Consumable fuel stock\n>>> voltage_limit = VariableRole.CONSTRAINT     # System operational limit",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "VariableStatus",
          "bases": [
            "Enum"
          ],
          "docstring": "Represents the current operational status of a variable.\n\nThe status indicates the variable's state in the reasoning process,\nincluding whether its value is resolved, conflicted, or requires attention.\nThis is used for monitoring, debugging, and automated reasoning.\n\nAttributes\n----------\nINITIALIZED : VariableStatus\n    Variable has been created but no value has been set or processed.\n    This is the starting state for all new variables.\n\nRESOLVED : VariableStatus\n    Variable has a single, consistent value that satisfies all constraints.\n    This is the normal operational state for functioning variables.\n\nCONFLICTED : VariableStatus\n    Multiple sources propose conflicting values for the variable.\n    Requires conflict resolution before the variable can be used.\n    Example: Two sensors giving significantly different readings.\n\nUNCERTAIN : VariableStatus\n    Variable has high uncertainty or low confidence in its current value.\n    May indicate measurement noise, model uncertainty, or insufficient data.\n\nVIOLATED : VariableStatus\n    Variable's current value violates one or more constraints.\n    Requires corrective action or constraint relaxation.\n    Example: Temperature exceeding safe operating limits.\n\nINTERVENED : VariableStatus\n    Variable's value has been set by an external intervention.\n    This status helps track which variables have been manually controlled.\n\nOBSERVED : VariableStatus\n    Variable's value comes from direct observation/measurement.\n    Distinguished from model-predicted or inferred values.\n\nNotes\n-----\nStatus transitions are monitored by the system to trigger appropriate\nactions. For example, a VIOLATED status might trigger an alert or\nautomatic constraint enforcement.\n\nExamples\n--------\n>>> from dynexus import VariableStatus\n>>>\n>>> # Typical status progression\n>>> initial = VariableStatus.INITIALIZED  # New variable\n>>> measured = VariableStatus.OBSERVED    # Value from sensor\n>>> consistent = VariableStatus.RESOLVED  # Validated and consistent\n>>>\n>>> # Problem states requiring attention\n>>> conflicting_sources = VariableStatus.CONFLICTED\n>>> constraint_breach = VariableStatus.VIOLATED\n>>> low_confidence = VariableStatus.UNCERTAIN\n>>> manual_override = VariableStatus.INTERVENED",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "VariableMetadata",
          "bases": [],
          "docstring": "Comprehensive metadata container for variable properties and provenance.\n\nStores descriptive information, units, domain context, precision requirements,\ntimestamps, tags, and custom annotations. This metadata supports:\n- Documentation and understanding of variable purpose\n- Unit consistency checking\n- Domain-specific reasoning\n- Provenance tracking\n- Search and filtering by tags\n- Custom extensions via annotations\n\nParameters\n----------\nname : str\n    Human-readable name of the variable. Should be descriptive and unique\n    within its context. Example: \"reactor_core_temperature\"\n\ndescription : str, optional\n    Detailed description of what the variable represents, its purpose,\n    and any relevant context. Default: \"\" (empty string).\n\nunits : str, optional\n    Physical or logical units of measurement. Should follow standard\n    notation (e.g., \"meters\", \"kg/s\", \"USD\", \"percentage\").\n    Default: None (unitless).\n\ndomain : str, optional\n    Domain or context in which the variable operates. Helps with\n    domain-specific reasoning and validation.\n    Example: \"thermal\", \"financial\", \"biological\".\n    Default: None (generic).\n\nprecision : float, optional\n    Required numerical precision for comparisons and calculations.\n    Used for tolerance in equality checks and convergence criteria.\n    Default: 1e-6.\n\ncreated_at : datetime, optional\n    Timestamp when the variable was created. Automatically set to\n    current UTC time if not specified.\n\nupdated_at : datetime, optional\n    Timestamp when the variable metadata was last modified. Automatically\n    set to current UTC time if not specified.\n\ntags : set[str], optional\n    Set of tags for categorization and filtering. Tags are case-sensitive\n    and should use snake_case or kebab-case conventions.\n    Default: empty set.\n\nannotations : dict[str, Any], optional\n    Custom key-value pairs for extended metadata. Supports domain-specific\n    extensions without modifying the core structure.\n    Default: empty dictionary.\n\nAttributes\n----------\nname : str\n    See Parameters section.\n\ndescription : str\n    See Parameters section.\n\nunits : Optional[str]\n    See Parameters section.\n\ndomain : Optional[str]\n    See Parameters section.\n\nprecision : float\n    See Parameters section.\n\ncreated_at : datetime\n    See Parameters section.\n\nupdated_at : datetime\n    See Parameters section.\n\ntags : set[str]\n    See Parameters section.\n\nannotations : dict[str, Any]\n    See Parameters section.\n\nMethods\n-------\nupdate()\n    Update the last modified timestamp to current UTC time.\n\nadd_tag(tag: str)\n    Add a tag to the variable's tag set.\n\nadd_annotation(key: str, value: Any)\n    Add or update a custom annotation.\n\nExamples\n--------\n>>> from datetime import datetime\n>>> from dynexus import VariableMetadata\n>>>\n>>> # Basic metadata for a temperature variable\n>>> temp_metadata = VariableMetadata(\n...     name=\"ambient_temperature\",\n...     description=\"Ambient air temperature at sensor location\",\n...     units=\"Celsius\",\n...     domain=\"environmental\",\n...     precision=0.1,  # 0.1 degree precision\n...     tags={\"sensor\", \"environment\", \"monitoring\"}\n... )\n>>>\n>>> # Metadata with custom annotations\n>>> stock_metadata = VariableMetadata(\n...     name=\"stock_price_XYZ\",\n...     description=\"Closing price of XYZ Corporation\",\n...     units=\"USD\",\n...     domain=\"financial\",\n...     precision=0.01,\n...     annotations={\n...         \"ticker\": \"XYZ\",\n...         \"exchange\": \"NASDAQ\",\n...         \"sector\": \"technology\",\n...         \"data_source\": \"market_data_api_v2\"\n...     }\n... )\n>>>\n>>> # Modifying metadata\n>>> temp_metadata.add_tag(\"outdoor\")\n>>> temp_metadata.add_annotation(\"sensor_id\", \"temp_sensor_001\")\n>>> temp_metadata.update()  # Refresh updated_at timestamp\n>>>\n>>> print(f\"Tags: {temp_metadata.tags}\")\nTags: {'sensor', 'environment', 'monitoring', 'outdoor'}\n>>> print(f\"Last updated: {temp_metadata.updated_at}\")\nLast updated: 2025-12-10 10:28:53.454682",
          "methods": [
            {
              "name": "update",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Update the last modified timestamp to current UTC time.\n\nThis method should be called whenever any metadata field is modified\nto maintain accurate provenance tracking.\n\nExamples\n--------\n>>> import time\n>>> from dynexus import VariableMetadata\n>>> metadata = VariableMetadata(name=\"test\")\n>>> original_time = metadata.updated_at\n>>> time.sleep(0.001)\n>>> metadata.update()\n>>> assert metadata.updated_at > original_time"
            },
            {
              "name": "add_tag",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "tag",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Add a tag to the variable's tag set.\n\nTags are used for categorization, filtering, and search. Duplicate\ntags are automatically ignored (set semantics).\n\nParameters\n----------\ntag : str\n    Tag to add. Should be descriptive and follow naming conventions\n    (snake_case or kebab-case recommended).\n\nRaises\n------\nTypeError\n    If tag is not a string.\n\nExamples\n--------\n>>> from dynexus import VariableMetadata\n>>> metadata = VariableMetadata(name=\"test\")\n>>> metadata.add_tag(\"experimental\")\n>>> metadata.add_tag(\"high_priority\")\n>>> print(metadata.tags)\n{'experimental', 'high_priority'}\n>>>\n>>> # Duplicate tags are ignored\n>>> metadata.add_tag(\"experimental\")\n>>> print(len(metadata.tags))\n2"
            },
            {
              "name": "add_annotation",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "key",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Add or update a custom annotation.\n\nAnnotations allow for domain-specific metadata extensions without\nmodifying the core metadata structure. They are stored as key-value\npairs and can contain any serializable data.\n\nParameters\n----------\nkey : str\n    Annotation key. Should be descriptive and follow naming conventions\n    (snake_case recommended).\nvalue : Any\n    Annotation value. Must be JSON-serializable for proper serialization.\n\nRaises\n------\nTypeError\n    If key is not a string.\n\nNotes\n-----\nIf the key already exists, its value will be overwritten.\n\nExamples\n--------\n>>> from dynexus import VariableMetadata\n>>> metadata = VariableMetadata(name=\"test\")\n>>>\n>>> # Add various types of annotations\n>>> metadata.add_annotation(\"version\", \"1.2.3\")\n>>> metadata.add_annotation(\"calibration_date\", \"2024-01-15\")\n>>> metadata.add_annotation(\"allowed_range\", {\"min\": 0, \"max\": 100})\n>>>\n>>> print(metadata.annotations[\"version\"])\n1.2.3\n>>> print(metadata.annotations[\"allowed_range\"][\"min\"])\n0"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "BaseVariable",
          "bases": [
            "ObservableMixin",
            "SerializableMixin",
            "StatefulMixin[dict[str, Any]]",
            "ABC"
          ],
          "docstring": "Base class for all Variables.\n\nA Variable represents a quantity with state, uncertainty, constraints,\nand reasoning capabilities. It serves as the output of mechanisms and can\nparticipate in causal relationships while maintaining consistency.\n\nAttributes\n----------\nmetadata : VariableMetadata\n    Metadata about the variable including name, description, units, etc.\nrole : VariableRole\n    Role of the variable in causal relationships.\nstatus : VariableStatus\n    Current status of the variable.\nvalue : Optional[Any]\n    Current value of the variable.\nuncertainty : float\n    Current uncertainty level (0=deterministic, 1=completely uncertain).\nconstraints : list[Callable]\n    List of constraint functions that must be satisfied.\ndependencies : set[str]\n    IDs of variables this variable depends on.\ndependents : set[str]\n    IDs of variables that depend on this variable.\nconfidence : float\n    Confidence in current value (0 to 1).\nhistory : list[tuple[datetime, Any, float]]\n    Historical values with timestamps and confidence.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import BaseVariable, VariableRole\n>>>\n>>> class Temperature(BaseVariable):\n...     def validate(self, value: float) -> bool:\n...         return -273.15 <= value <= 1000.0  # Absolute zero to reasonable max\n...\n>>> temp = Temperature(\n...     name=\"room_temperature\",\n...     description=\"Ambient room temperature\",\n...     units=\"Celsius\",\n...     role=VariableRole.ENDOGENOUS\n... )\n>>> temp.update(22.5, confidence=0.95)\nTrue\n>>> print(f\"Value: {temp.value}°C, Confidence: {temp.confidence:.2f}\")\nValue: 22.5°C, Confidence: 0.95",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "name",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "description",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "units",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "role",
                  "type": "normal",
                  "annotation": "VariableRole",
                  "default": null
                },
                {
                  "name": "initial_value",
                  "type": "normal",
                  "annotation": "Any | None",
                  "default": null
                },
                {
                  "name": "initial_confidence",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "config",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize a new Variable.\n\nParameters\n----------\nname : str\n    Unique name of the variable.\ndescription : str, optional\n    Human-readable description of the variable.\nunits : str, optional\n    Physical units of the variable (e.g., \"meters\", \"kg/s\").\nrole : VariableRole, optional\n    Role in causal relationships (default: ENDOGENOUS).\ninitial_value : Any, optional\n    Initial value of the variable.\ninitial_confidence : float, optional\n    Initial confidence in the value (default: 0.8).\nconfig : dict, optional\n    Configuration dictionary for the variable.\n\nRaises\n------\nValueError\n    If name is empty or confidence not in [0, 1].\n\nNotes\n-----\nThe following events can be subscribed for further analysis:\n    validation_failed, value_changed, constraint_added,\n    dependency_added, dependent_added, state_restored,\n    history_cleared, reset"
            },
            {
              "name": "touch",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Update the last modified timestamp."
            },
            {
              "name": "update",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                },
                {
                  "name": "confidence",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "source",
                  "type": "normal",
                  "annotation": "str | None",
                  "default": null
                },
                {
                  "name": "timestamp",
                  "type": "normal",
                  "annotation": "datetime | None",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Update the variable's value with validation.\n\nParameters\n----------\nvalue : Any\n    New value for the variable.\nconfidence : float, optional\n    Confidence in the new value (default: 0.8).\nsource : str, optional\n    Source of the update (e.g., mechanism id).\ntimestamp : datetime, optional\n    Timestamp for the update (default: current time).\n\nReturns\n-------\nbool\n    True if update was successful, False if validation failed.\n\nRaises\n------\nValueError\n    If confidence is not in [0, 1].\n\nExamples\n--------\n>>> from dynexus import BaseVariable, VariableRole\n>>> class TempVariable(BaseVariable):\n...     def validate(self, value: float) -> bool:\n...         return 0. <= value <= 100.\n>>> var = TempVariable(name=\"test\", role=VariableRole.ENDOGENOUS)\n>>> var.update(42.0, confidence=0.9)\nTrue\n>>> var.update(-1, confidence=0.5)  # Assuming a constraint prevents negative values\nValidation failed for Variable test: value=-1\nFalse"
            },
            {
              "name": "add_constraint",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "constraint",
                  "type": "normal",
                  "annotation": "Callable[[Any], bool]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Add a constraint function that must be satisfied by the Variable's value.\n\nParameters\n----------\nconstraint : Callable[[Any], bool]\n    Function that takes a value and returns True if constraint is satisfied.\n\nExamples\n--------\n>>> from dynexus import BaseVariable, VariableRole\n>>> class AgeVariable(BaseVariable):\n...     def validate(self, value: int) -> bool:\n...         return value <= 100\n>>> var = AgeVariable(name=\"age\", role=VariableRole.ENDOGENOUS)\n>>> var.add_constraint(lambda x: x >= 0)\n>>> var.update(45)\nTrue\n>>> var.update(-5)\nFalse"
            },
            {
              "name": "add_validator",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "validator",
                  "type": "normal",
                  "annotation": "Callable[[Any], bool]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Add a validator function for value validation.\n\nParameters\n----------\nvalidator : Callable[[Any], bool]\n    Function that validates a value."
            },
            {
              "name": "add_dependency",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "variable_id",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Add a dependency on another Variable.\n\nParameters\n----------\nvariable_id : str\n    ID of the variable this variable depends on."
            },
            {
              "name": "add_dependent",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "variable_id",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Add a variable that depends on this one.\n\nParameters\n----------\nvariable_id : str\n    ID of the variable that depends on this variable."
            },
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert variable to dictionary representation.\n\nReturns\n-------\ndict[str, Any]\n    Dictionary containing variable state and metadata."
            },
            {
              "name": "get_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "last_entries",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get the current state of the variable.\n\nReturns\n-------\ndict[str, Any]\n    Current variable state."
            },
            {
              "name": "set_state",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "state",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Set the variable state.\n\nParameters\n----------\nstate : dict\n    State to restore."
            },
            {
              "name": "get_history",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "n",
                  "type": "normal",
                  "annotation": "int | None",
                  "default": null
                },
                {
                  "name": "since",
                  "type": "normal",
                  "annotation": "datetime | None",
                  "default": null
                }
              ],
              "returns": "list[tuple[datetime, Any | None, float]]",
              "decorators": [],
              "is_async": false,
              "docstring": "Get historical values of the variable.\n\nParameters\n----------\nn : int, optional\n    Number of most recent entries to return.\nsince : datetime, optional\n    Only return entries since this timestamp.\n\nReturns\n-------\nlist[tuple[datetime, Optional[Any], float]]\n    Historical values with timestamps and confidence."
            },
            {
              "name": "clear_history",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Clear the variable's history."
            },
            {
              "name": "reset",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Reset the variable to its initial state.\n\nThis clears the value, history, and resets status to INITIALIZED.\nDependencies and constraints are preserved."
            },
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [
                "abstractmethod"
              ],
              "is_async": false,
              "docstring": "Abstract method for value validation.\n\nSubclasses must implement domain-specific validation logic.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value is valid for this variable."
            },
            {
              "name": "_validate_value",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Private validation pipeline.\n\nParameters\n----------\nvalue : Any\n    Value to validate.\n\nReturns\n-------\nbool\n    True if value passes all validations and constraints."
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Representation of the variable."
            },
            {
              "name": "__repr__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Detailed representation of the variable."
            }
          ],
          "class_methods": [
            {
              "name": "from_dict",
              "args": [
                {
                  "name": "cls",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "data",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "BaseVariable",
              "decorators": [
                "classmethod"
              ],
              "is_async": false,
              "docstring": "Create a variable from dictionary representation.\n\nParameters\n----------\ndata : dict[str, Any]\n    Dictionary containing variable data.\n\nReturns\n-------\nBaseVariable\n    Reconstructed variable.\n\nNotes\n-----\nThis is a simplified reconstruction. Some runtime state may not be fully restored."
            }
          ],
          "static_methods": [],
          "properties": [
            {
              "name": "id",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the variable's unique identifier."
            },
            {
              "name": "created_at",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "datetime",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the creation timestamp."
            },
            {
              "name": "updated_at",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "datetime",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the last update timestamp."
            },
            {
              "name": "value",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "Any | None",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the current value of the variable.\n\nReturns\n-------\nOptional[Any]\n    Current value, or None if not set."
            },
            {
              "name": "name",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the variable name."
            },
            {
              "name": "units",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str | None",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the variable units."
            },
            {
              "name": "description",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [
                "property"
              ],
              "is_async": false,
              "docstring": "Get the variable description."
            }
          ]
        }
      ],
      "functions": [],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        },
        {
          "name": "T",
          "annotation": null
        },
        {
          "name": "VariableType",
          "annotation": null
        }
      ]
    },
    "dynexus/core/action/effect.py": {
      "path": "dynexus/core/action/effect.py",
      "module_docstring": "Action execution effects and learning signals.\n\nThe ActionEffect system captures the *actual outcomes* of executed proposals\nto enable learning and system adaptation. Effects serve as the ground\ntruth that validates or refutes Variable-generated hypotheses.\n\nKey Concepts:\n-------------\n- **ActionEffect**: Records what actually happened when a proposal was executed\n- **Learning Signal**: Formatted data for training models\n- **Outcome Delta**: Difference between expected and actual outcomes\n- **Resource Efficiency**: Measure of resource consumption relative to benefit\n\nCCES Learning Loop:\n-------------------\n1. VARIABLE → Proposes action with expected outcome\n2. EXECUTIVE → Executes action, creates ActionEffect\n3. LEARNING → Uses learning_signal() to update models\n4. VARIABLE → Adjusted based on past effect accuracy\n\nThis creates a feedback loop where Variables learn from their proposal accuracy\nand resource efficiency, improving future proposals.",
      "imports": [
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "datetime",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "timezone",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "utils.types",
          "name": "IntOrFloat",
          "alias": null,
          "level": 3
        }
      ],
      "classes": [
        {
          "name": "ActionEffect",
          "bases": [],
          "docstring": "Outcome of executing an ActionProposal.\n\nCaptures the ground truth of what happened when an Executive executed\na Variable's proposal. Effects are essential for:\n\n1. **Learning**: Did the action produce the expected effect?\n2. **Confidence Calibration**: Should the Variable adjust its confidence scoring?\n3. **Policy Evaluation**: How effective was this type of intervention?\n4. **Resource Optimization**: Was the resource consumption justified?\n\nEffects are immutable records of execution history. They serve as the\nfactual basis for learning, unlike proposals which are hypotheses.\n\nAttributes\n----------\nproposal_id : str\n    ID of the ActionProposal that was executed. Links effect back to\n    the originating Variable and reasoning context.\nsuccess : bool\n    Whether the execution completed without errors. A False value\n    indicates either execution failure or violation of expectations.\ntimestamp : datetime\n    When the effect was recorded (UTC). Used for temporal analysis\n    and learning windowing.\nactual_outcome : Any\n    The measured outcome after execution. Type depends on action:\n    - For interventions: new state/value\n    - For adjustments: delta from previous state\n    - For queries: retrieved information\nexpected_outcome : Any\n    The outcome predicted by the proposing Variable. Used to compute\n    accuracy metrics and update Variable reasoning models.\nside_effects : Dict[str, Any]\n    Unintended or secondary consequences of the action. Key-value pairs\n    where keys describe the effect type and values quantify it.\n    Example: {\"energy_spike\": 15.2, \"noise_increase\": \"3dB\"}\nexecution_time_ms : float\n    Time taken to execute the action in milliseconds. Measures\n    computational/real-world time cost.\nresource_consumed : Dict[str, float]\n    Resources consumed during execution. Keys are resource types\n    (e.g., \"cpu\", \"energy\", \"bandwidth\"), values are amounts.\nerror_message : Optional[str]\n    If success=False, describes what went wrong. None for successful\n    executions or when failure reason is unknown.\n\nExamples\n--------\n>>> # Successful temperature adjustment\n>>> from dynexus import ActionEffect\n>>> effect = ActionEffect(\n...     proposal_id=\"proposal_abc123\",\n...     success=True,\n...     actual_outcome=22.1,\n...     expected_outcome=22.0,\n...     side_effects={\"energy_used\": 45.2},\n...     execution_time_ms=125.5,\n...     resource_consumed={\"power\": 45.2, \"cpu\": 0.15}\n... )\n>>> effect.learning_signal()\n{\n    'proposal_id': 'proposal_abc123',\n    'success': True,\n    'outcome_delta': 0.1,\n    'resource_efficiency': 0.022\n}\n\n>>> # Failed execution\n>>> failed_effect = ActionEffect(\n...     proposal_id=\"proposal_xyz789\",\n...     success=False,\n...     error_message=\"Resource constraint violation\",\n...     execution_time_ms=50.0,\n...     resource_consumed={\"power\": 10.0}\n... )",
          "methods": [
            {
              "name": "learning_signal",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert effect into a learning-compatible signal.\n\nTransforms raw execution data into a structured format suitable for\nmachine learning models, policy evaluation, and confidence calibration.\nThe learning signal focuses on *action efficacy* and *resource efficiency*.\n\nReturns\n-------\ndict[str, Any]\n    Learning signal with keys:\n    - proposal_id: Identifier for tracing\n    - success: Binary success indicator\n    - outcome_delta: Difference between actual and expected (None if not computable)\n    - resource_efficiency: Efficiency score (higher = more efficient)\n\nNotes\n-----\n- Used by Learning module to update Variable reasoning models\n- Signals from multiple effects are aggregated for statistical learning\n- Resource efficiency uses reciprocal scaling: 1/(1+total_resources)\n  This gives 1.0 for zero consumption, approaching 0 for high consumption\n\nExamples\n--------\n>>> from dynexus import ActionEffect\n>>> effect = ActionEffect(\n...     proposal_id=\"prop1\",\n...     success=True,\n...     actual_outcome=10.5,\n...     expected_outcome=10.0,\n...     resource_consumed={\"cpu\": 0.2, \"memory\": 0.1}\n... )\n>>> signal = effect.learning_signal()\n>>> signal[\"outcome_delta\"]\n0.5\n>>> 0 < signal[\"resource_efficiency\"] < 1\nTrue"
            },
            {
              "name": "_outcome_delta",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "IntOrFloat | None",
              "decorators": [],
              "is_async": false,
              "docstring": "Compute difference between actual and expected outcomes.\n\nAttempts to compute: actual_outcome - expected_outcome\n\nReturns\n-------\nOptional[float]\n    Numerical difference if both outcomes are numeric and subtraction\n    is defined, otherwise None.\n\nNotes\n-----\n- Used to measure prediction accuracy\n- Positive delta means actual > expected (over-effect)\n- Negative delta means actual < expected (under-effect)\n- None indicates non-numeric outcomes or missing data\n- Exception-safe: returns None if subtraction fails"
            },
            {
              "name": "_resource_efficiency",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [],
              "is_async": false,
              "docstring": "Calculate efficiency score based on resource consumption.\n\nUses formula: efficiency = 1 / (1 + total_resources)\n\nProperties:\n- Score ∈ (0, 1]\n- 1.0 for zero resource consumption\n- Approaches 0 as consumption → ∞\n- Monotonically decreasing with consumption\n\nReturns\n-------\nfloat\n    Efficiency score between 0 and 1.\n\nNotes\n-----\n- Assumes all resources have equal weight (unitless aggregation)\n- In production, consider weighted sum based on resource scarcity\n- Add 1 to denominator to avoid division by zero and bound score"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": []
    },
    "dynexus/core/action/policy.py": {
      "path": "dynexus/core/action/policy.py",
      "module_docstring": "Proposal selection policies.\n\nPolicies belong to the Optimizer layer and are responsible for comparing\nproposals across Variables, objectives, and resources. They implement the\n\"select\" phase of the CCES pipeline:\n\nCCES Pipeline:\n--------------\n1. VARIABLE → Proposes actions\n2. RUNTIME → Filters based on constraints\n3. OPTIMIZER → Selects optimal proposal using SelectionPolicy\n4. EXECUTIVE → Applies selected action\n\nPolicy Design Principles:\n-------------------------\n- Policies are stateless functions of proposals and context\n- Multiple policies can be composed or switched dynamically\n- Policies balance exploration vs. exploitation\n- Resource-awareness is fundamental, not optional",
      "imports": [
        {
          "module": "abc",
          "name": "ABC",
          "alias": null,
          "level": 0
        },
        {
          "module": "abc",
          "name": "abstractmethod",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "proposal",
          "name": "ActionProposal",
          "alias": null,
          "level": 1
        }
      ],
      "classes": [
        {
          "name": "SelectionPolicy",
          "bases": [
            "ABC"
          ],
          "docstring": "Abstract base class for proposal selection policies.\n\nDefines the interface for all Optimizer selection strategies. Concrete\npolicies implement specific algorithms for choosing among validated\nproposals based on system objectives, resource constraints, and\nlearning priorities.\n\nKey Responsibilities:\n---------------------\n1. Evaluate multiple ActionProposals\n2. Consider current system context\n3. Apply domain-specific selection criteria\n4. Return the single best proposal (or None)\n\nImplementation Notes:\n--------------------\n- Policies should be deterministic for reproducibility\n- Context should include resource availability, system state, and learning goals\n- Returning None indicates no proposal is suitable given current context\n\nExamples\n--------\n>>> from dynexus import SelectionPolicy\n>>> class ConfidenceThresholdPolicy(SelectionPolicy):\n...     def select(self, proposals, context):\n...         threshold = context.get(\"min_confidence\", 0.7)\n...         qualified = [p for p in proposals if p.confidence >= threshold]\n...         return qualified[0] if qualified else None\n\n>>> # Usage in Optimizer\n>>> policy = ConfidenceThresholdPolicy()\n>>> selected = policy.select(validated_proposals, system_context)",
          "methods": [
            {
              "name": "select",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ActionProposal]",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "ActionProposal | None",
              "decorators": [
                "abstractmethod"
              ],
              "is_async": false,
              "docstring": "Select the optimal proposal from a list of validated candidates.\n\nParameters\n----------\nproposals : list[ActionProposal]\n    List of proposals that have passed Runtime validation.\n    Guaranteed to be non-empty (caller handles empty case).\n    Proposals are mutable but should not be modified by policy.\n\ncontext : dict[str, Any]\n    Current system state and optimization context. Typically includes:\n    - Resource availability (e.g., {\"cpu\": 0.3, \"memory\": 0.6})\n    - System objectives (e.g., {\"goal\": \"efficiency\", \"mode\": \"normal\"})\n    - Learning phase (e.g., {\"exploration_rate\": 0.1})\n    - Temporal constraints (e.g., {\"time_budget_ms\": 100})\n\nReturns\n-------\nOptional[ActionProposal]\n    The selected proposal, or None if no proposal is suitable\n    given the current context and policy criteria.\n\nRaises\n------\nNotImplementedError\n    Must be implemented by concrete subclasses.\n\nNotes\n-----\n- The method should be efficient; selection occurs in real-time\n- Context dictionary structure is policy-dependent\n- Policies may log their selection rationale for audit trails"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "MultiObjectivePolicy",
          "bases": [
            "SelectionPolicy"
          ],
          "docstring": "Selects proposals using a weighted multi-objective score.\n\nImplements a linear weighted sum of normalized objectives:\n    score = w1*confidence + w2*impact + w3*urgency - w4*cost\n\nObjectives (extracted from proposal parameters):\n- Confidence: Variable's self-assessed prediction accuracy [0.0, 1.0]\n- Estimated Impact: Magnitude of expected effect\n- Local Urgency: Time-sensitive or priority-adjusted importance\n- Resource Cost: Anticipated resource consumption\n\nDefault Weights:\n----------------\n- Confidence: 0.4 (highest weight - trust in Variable's reasoning)\n- Impact: 0.3 (substantial effects preferred)\n- Urgency: 0.2 (time-sensitive actions prioritized)\n- Cost: 0.1 (resource efficiency secondary to effectiveness)\n\nDesign Philosophy:\n------------------\n\"Better to execute a high-confidence, moderate-impact action now\nthan wait for perfect information.\" Favors decisive action with\nreasonable certainty over extreme optimization.\n\nAttributes\n----------\nweights : dict[str, float]\n    Mapping from objective names to weights. Can be overridden\n    by subclasses or via context parameter.\n\nExamples\n--------\n>>> from dynexus import MultiObjectivePolicy, ActionProposal\n>>> policy = MultiObjectivePolicy()\n>>> proposals = [\n...     ActionProposal(confidence=0.8, parameters={\"estimated_impact\": 2.0}),\n...     ActionProposal(confidence=0.9, parameters={\"estimated_impact\": 1.0})\n... ]\n>>> context = {\"selection_mode\": \"balanced\"}\n>>> selected = policy.select(proposals, context)\n>>> selected.confidence\n0.8\n\n>>> # Custom weights via subclass\n>>> class EfficiencyFirstPolicy(MultiObjectivePolicy):\n...     def __init__(self):\n...         self.weights = {\"confidence\": 0.2, \"impact\": 0.2,\n...                        \"urgency\": 0.1, \"cost\": 0.5}",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Initialize with default objective weights."
            },
            {
              "name": "select",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ActionProposal]",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "ActionProposal | None",
              "decorators": [],
              "is_async": false,
              "docstring": "Select proposal maximizing weighted multi-objective score.\n\nScoring Formula:\n---------------\nscore = (w_confidence * confidence)\n      + (w_impact * estimated_impact)\n      + (w_urgency * urgency)\n      - (w_cost * resource_cost)\n\nWhere:\n- confidence: From proposal.confidence [0.0, 1.0]\n- estimated_impact: From parameters[\"estimated_impact\"] (default 0.0)\n- urgency: From parameters[\"local_urgency\"] or confidence (default)\n- resource_cost: From parameters[\"resource_cost\"] (default 0.0)\n\nParameters\n----------\nproposals : list[ActionProposal]\n    Validated proposals from Runtime. Each must have confidence\n    and may have optional parameters for objectives.\n\ncontext : dict[str, Any]\n    May contain \"policy_weights\" to override default weights.\n    Example: {\"policy_weights\": {\"confidence\": 0.5, \"cost\": 0.3}}\n\nReturns\n-------\nOptional[ActionProposal]\n    Proposal with highest score, or None if proposals list is empty.\n\nNotes\n-----\n- Objectives are not normalized; assumes similar scales\n- Negative scores are possible with high resource costs\n- Ties are broken by first occurrence (deterministic)\n- Missing parameters use sensible defaults\n\nExamples\n--------\n>>> from dynexus import ActionProposal, MultiObjectivePolicy\n>>> proposal1 = ActionProposal(\n...     confidence=0.8,\n...     parameters={\"estimated_impact\": 2.0, \"resource_cost\": 1.0}\n... )\n>>> proposal2 = ActionProposal(\n...     confidence=0.6,\n...     parameters={\"estimated_impact\": 3.0, \"local_urgency\": 1.5}\n... )\n>>> policy = MultiObjectivePolicy()\n>>> selected = policy.select([proposal1, proposal2], {})\n>>> # proposal1 score: 0.4*0.8 + 0.3*2.0 - 0.1*1.0 = 0.32 + 0.6 - 0.1 = 0.82\n>>> # proposal2 score: 0.4*0.6 + 0.3*3.0 + 0.2*1.5 = 0.24 + 0.9 + 0.3 = 1.44\n>>> selected == proposal2  # Higher score wins\nTrue"
            },
            {
              "name": "explain_selection",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ActionProposal]",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Generate explanation of selection decision.\n\nUseful for debugging, audit trails, and learning why certain\nproposals were selected over others.\n\nParameters\n----------\nproposals : list[ActionProposal]\n    Same proposals passed to select().\ncontext : dict[str, Any]\n    Same context passed to select().\n\nReturns\n-------\ndict[str, Any]\n    Explanation including:\n    - scores: List of (proposal_id, score) pairs\n    - weights: Effective weights used\n    - winner: ID of selected proposal\n    - rationale: Text explanation\n\nExamples\n--------\n>>> explanation = policy.explain_selection(proposals, context)\n>>> explanation[\"weights\"]\n{'confidence': 0.4, 'impact': 0.3, 'urgency': 0.2, 'cost': 0.1}"
            },
            {
              "name": "_select_optimal_proposal",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposals",
                  "type": "normal",
                  "annotation": "list[ActionProposal]",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "tuple[dict[str, float], ActionProposal] | None",
              "decorators": [],
              "is_async": false,
              "docstring": "Select proposal maximizing weighted multi-objective score."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": []
    },
    "dynexus/core/action/validator.py": {
      "path": "dynexus/core/action/validator.py",
      "module_docstring": "Action Proposal Validator.\n\nThe Validator is responsible for determining whether an ActionProposal\nis *admissible* under the current system state, contracts, and execution context.\n\nValidation is the second phase of the CCES pipeline:\n----------------------------------------------------\n1. VARIABLE → Proposes actions\n2. RUNTIME → Validates proposals (this module)\n3. OPTIMIZER → Selects optimal proposal\n4. EXECUTIVE → Applies selected action\n\nValidation Principles:\n---------------------\n- Deterministic: Same input always produces same validation result\n- Side-effect free: Validation never modifies system state\n- Non-optimizing: Only checks feasibility, doesn't rank or optimize\n- Fail-fast: First violation terminates validation (returns False)\n\nRejected proposals do not mutate the system and can be safely discarded\nor analyzed for learning purposes.",
      "imports": [
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "proposal",
          "name": "ActionProposal",
          "alias": null,
          "level": 1
        },
        {
          "module": "proposal",
          "name": "ProposalStatus",
          "alias": null,
          "level": 1
        }
      ],
      "classes": [
        {
          "name": "ValidationError",
          "bases": [
            "Exception"
          ],
          "docstring": "Raise exception when a proposal fails validation with critical errors.\n\nThis exception is typically caught by the Runtime to handle\nvalidation failures gracefully, but can be raised for programmatic\nvalidation checks that require immediate failure.\n\nAttributes\n----------\nproposal_id : str\n    ID of the proposal that failed validation\nerrors : List[str]\n    list of validation error messages\ncontext_summary : dict[str, Any]\n    Summary of validation context for debugging",
          "methods": [
            {
              "name": "__init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal_id",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                },
                {
                  "name": "errors",
                  "type": "normal",
                  "annotation": "list[str]",
                  "default": null
                },
                {
                  "name": "context_summary",
                  "type": "normal",
                  "annotation": "dict[str, Any] | None",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Raise exception when a proposal fails validation with critical errors.\n\nParameters\n----------\nproposal_id : str\n    ID of the proposal that failed validation\nerrors : List[str]\n    list of validation error messages\ncontext_summary : dict[str, Any]\n    Summary of validation context for debugging\n\nExamples\n--------\n>>> from dynexus import ProposalValidator, ValidationError, ActionProposal\n>>> validator = ProposalValidator()\n>>> proposal, context = ActionProposal(), {}\n>>> try:\n...     if not validator.validate(proposal, context):\n...         raise ValidationError(\n...             proposal.id,\n...             proposal.validation_errors,\n...             {\"state\": context.get(\"system_state\")}\n...         )\n... except ValidationError as e:\n...     print(f\"Proposal {e.proposal_id} failed: {e.errors}\")\nProposal proposal_1766862159551_1_0531e14d failed: ['Missing action_type', 'Missing action target']"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "ProposalValidator",
          "bases": [],
          "docstring": "Validates ActionProposals against CCES semantic rules.\n\nThe validator enforces system-wide consistency and feasibility before\nproposals proceed to the Optimizer for selection. It implements a\nmulti-stage validation pipeline with fail-fast semantics.\n\nValidation Stages:\n------------------\n1. Basic Integrity: Proposal structure and field validity\n2. Action Type: Recognized action types in current context\n3. Intervenability: Target can be causally intervened upon\n4. Contracts: Compliance with system contracts and constraints\n5. Resource Feasibility: Required resources are available\n6. Temporal Consistency: Time semantics are respected\n7. Conflict Detection: No conflicts with active proposals\n\nEach stage adds errors to `proposal.validation_errors` and any\nsingle failure causes overall validation to fail.\n\nAttributes\n----------\nNone (stateless by design)\n\nExamples\n--------\n>>> from datetime import datetime, timezone\n>>> from dynexus import ProposalValidator, ActionProposal\n>>> validator = ProposalValidator()\n>>> proposal = ActionProposal(\n...     action_type=\"adjust\",\n...     target=\"temperature\",\n...     confidence=0.8\n... )\n>>> context = {\n...     \"allowed_action_types\": {\"adjust\", \"set\", \"query\"},\n...     \"intervenable_targets\": {\"temperature\", \"pressure\"},\n...     \"resources\": {\"energy\": 100.0},\n...     \"time\": datetime.now(timezone.utc)\n... }\n>>> is_valid = validator.validate(proposal, context)\n>>> if is_valid:\n...     print(f\"Proposal {proposal.id} validated\")\n... else:\n...     print(f\"Validation errors: {proposal.validation_errors}\")\nProposal proposal_1766864109619_1_8703dabb validated",
          "methods": [
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ActionProposal",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate a proposal against the current system context.\n\nExecutes the full validation pipeline and updates the proposal's\nstatus to either VALIDATED or REJECTED. Clears any previous\nvalidation errors before starting.\n\nParameters\n----------\nproposal : ActionProposal\n    The proposal to validate. Must be in PROPOSED status.\n    The proposal's validation_errors list will be cleared and\n    repopulated with any validation failures.\n\ncontext : dict[str, Any]\n    Execution context including:\n    - state: Current system state variables\n    - contracts: List of active Contract objects\n    - resources: Available resources dict\n    - time: Current simulation/system time\n    - allowed_action_types: Set of recognized action types\n    - intervenable_targets: Set of causally intervenable targets\n    - active_proposals: List of currently active proposals\n\nReturns\n-------\nbool\n    True if proposal passes all validation stages (admissible).\n    False if any validation stage fails (inadmissible).\n\nNotes\n-----\n- Validation is idempotent: multiple calls with same inputs produce same result\n- Context dict may contain additional keys; unrecognized keys are ignored\n- Proposal is modified in-place (status and validation_errors updated)\n- Always call validation_errors.clear() at start to ensure clean state\n\nRaises\n------\nValueError\n    If proposal is not in PROPOSED status (only PROPOSED can be validated)\n\nExamples\n--------\n>>> from datetime import datetime, timezone\n>>> from dynexus import ActionProposal, ProposalValidator\n>>> # Successful validation\n>>> proposal = ActionProposal(action_type=\"set\", target=\"var1\", confidence=0.9)\n>>> context = {\n...     \"allowed_action_types\": {\"set\"},\n...     \"intervenable_targets\": {\"var1\"},\n...     \"resources\": {},\n...     \"time\": datetime.now(timezone.utc)\n... }\n>>> validator = ProposalValidator()\n>>> validator.validate(proposal, context)\nTrue\n>>> proposal.status\n<ProposalStatus.VALIDATED: 'validated'>\n\n>>> # Failed validation\n>>> bad_proposal = ActionProposal(action_type=\"\", target=None, confidence=1.5)\n>>> validator.validate(bad_proposal, {})\nFalse\n>>> bad_proposal.validation_errors\n['Missing action_type', 'Missing action target', 'Confidence must be in [0, 1]']"
            },
            {
              "name": "_check_basic_integrity",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ActionProposal",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Ensure proposal is well-formed and has required fields.\n\nChecks:\n- action_type is non-empty string\n- target is not None\n- confidence is in valid range [0.0, 1.0]\n\nParameters\n----------\nproposal : ActionProposal\n    Proposal to check\n\nNotes\n-----\nThis is the first validation stage because it checks fundamental\nstructural requirements. If a proposal fails here, it's malformed\nand shouldn't proceed to more complex validation stages."
            },
            {
              "name": "_check_action_type",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ActionProposal",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Ensure action type is recognized by the system.\n\nChecks if proposal.action_type is in the set of allowed action types\nfor the current context. If context contains no allowed_action_types,\nall action types are permitted (dangerous but flexible).\n\nParameters\n----------\nproposal : ActionProposal\n    Proposal to check\ncontext : dict[str, Any]\n    Must contain 'allowed_action_types' key with a set[str] value\n\nExamples\n--------\n>>> context = {\"allowed_action_types\": {\"adjust\", \"set\", \"query\"}}\n>>> # These would pass:\n>>> proposal1.action_type = \"adjust\"  # In set\n>>> proposal2.action_type = \"set\"     # In set\n>>> # This would fail:\n>>> proposal3.action_type = \"delete\"  # Not in set"
            },
            {
              "name": "_check_intervenability",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ActionProposal",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Ensure the target of the proposal is intervenable.\n\nIn causal reasoning, not all variables can be directly intervened upon.\nThis check ensures the proposal targets something that accepts causal\ninterventions in the current system context.\n\nParameters\n----------\nproposal : ActionProposal\n    Proposal to check\ncontext : dict[str, Any]\n    Must contain 'intervenable_targets' key with a set[Any] value\n\nNotes\n-----\n- Intervenability is context-dependent (e.g., some variables may become\n  non-intervenable during certain system modes)\n- If context contains no intervenable_targets, all targets are assumed\n  intervenable (permissive mode)\n\nExamples\n--------\n>>> context = {\"intervenable_targets\": {\"temperature\", \"pressure\"}}\n>>> # These would pass:\n>>> proposal1.target = \"temperature\"  # In set\n>>> # These would fail:\n>>> proposal2.target = \"latent_var\"   # Not in set"
            },
            {
              "name": "_check_contracts",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ActionProposal",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Check proposal-specific constraints and global contracts.\n\nValidates two types of constraints:\n1. Proposal-local constraints (defined by the proposing Variable)\n2. Global system contracts (applied to all proposals)\n\nParameters\n----------\nproposal : ActionProposal\n    Proposal with constraints to check\ncontext : dict[str, Any]\n    Must contain 'contracts' key with List[Contract] value\n\nNotes\n-----\n- Proposal constraints are Callable[[dict], bool] functions\n- Global contracts must have 'allows(proposal, context)' method\n- Exceptions in constraint evaluation are caught and logged as errors\n- This is where domain-specific business logic is enforced"
            },
            {
              "name": "_check_resource_feasibility",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ActionProposal",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Ensure required resources are available.\n\nChecks if the proposal's resource requirements (specified in\nparameters[\"resource_cost\"]) can be satisfied by available resources.\n\nParameters\n----------\nproposal : ActionProposal\n    Proposal with resource requirements\ncontext : dict[str, Any]\n    Must contain 'resources' key with dict[str, float] value\n\nNotes\n-----\n- Resource format: {\"cpu\": 0.5, \"memory\": 0.2, \"energy\": 10.0}\n- Missing resource types are assumed to have zero availability"
            },
            {
              "name": "_check_temporal_consistency",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ActionProposal",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Ensure proposal does not violate time semantics.\n\nChecks that the proposal's timestamp is not in the future relative\nto the current system time. This prevents time-travel paradoxes\nin reasoning systems.\n\nParameters\n----------\nproposal : ActionProposal\n    Proposal with timestamp to check\ncontext : dict[str, Any]\n    Must contain 'time' key with datetime value\n\nNotes\n-----\n- Proposal timestamps should reflect when the Variable created it\n- Future timestamps might indicate clock skew or simulation errors\n- If no time in context, temporal checks are skipped (permissive)"
            },
            {
              "name": "_check_conflicts",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "proposal",
                  "type": "normal",
                  "annotation": "ActionProposal",
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Detect conflicts with executing or pending proposals.\n\nPrevents multiple proposals from targeting the same entity\nsimultaneously, which could cause race conditions or undefined\nsystem behavior.\n\nParameters\n----------\nproposal : ActionProposal\n    Proposal to check for conflicts\ncontext : dict[str, Any]\n    Must contain 'active_proposals' key with List[ActionProposal] value\n\nNotes\n-----\n- Only checks proposals in EXECUTING or SELECTED status\n- Self-conflict (same proposal) is ignored\n- Conflict detection is conservative: may reject valid proposals\n  to guarantee safety"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": []
    },
    "dynexus/core/action/proposal.py": {
      "path": "dynexus/core/action/proposal.py",
      "module_docstring": "Action Proposal System.\n\nAn ActionProposal represents a *hypothesis for intervention*:\n\"If this action is executed under the current state and constraints,\nthen a particular outcome is expected.\"\n\nProposals are created by Variables, filtered by the Runtime,\nselected by the Optimizer, and executed by the Executive.\n\nCCES Pipeline Flow:\n-------------------\n1. VARIABLE (Agent) → Proposes actions based on reasoning\n2. RUNTIME (Filter) → Validates against contracts/constraints\n3. OPTIMIZER (Selector) → Selects optimal proposal(s) based on objectives\n4. EXECUTIVE (Executor) → Applies selected actions to system state\n\nKey Design Principles:\n---------------------\n- Proposals are hypothesis, not commitments\n- Validation occurs before any state mutation\n- Variables propose, but don't execute (separation of concerns)\n- Global consistency through centralized execution",
      "imports": [
        {
          "module": "collections.abc",
          "name": "Callable",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "datetime",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "timezone",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "Enum",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "utils.id",
          "name": "generate_id",
          "alias": null,
          "level": 3
        }
      ],
      "classes": [
        {
          "name": "ProposalStatus",
          "bases": [
            "Enum"
          ],
          "docstring": "Lifecycle states of an ActionProposal within the CCES pipeline.\n\nStates follow the progression through CCES components:\n\n- PROPOSED: Created by a Variable, awaiting validation\n- VALIDATED: Passed runtime constraint checking\n- SELECTED: Chosen by optimizer for execution\n- EXECUTING: Currently being applied by executive\n- COMPLETED: Successfully executed with results\n- REJECTED: Failed validation or filtering\n- CANCELED: Aborted before execution (e.g., preempted)\n\nTransitions are managed by different CCES components to ensure\nclear separation of responsibilities.",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "ActionProposal",
          "bases": [],
          "docstring": "An action proposal emitted by a reasoning Variable.\n\nRepresents a Variable's hypothesis about a beneficial intervention.\nProposals carry the \"why\" (justification, confidence) alongside the\n\"what\" (action details) to support informed selection by the Optimizer.\n\nSemantics:\n----------\n- Proposals do NOT mutate system state\n- Proposals may be rejected without side effects\n- Proposals carry justification and confidence metrics\n- Execution is external and centralized through Executive\n\nThis separation ensures global consistency and enables\nsimulation, rollback, and counterfactual reasoning.\n\nAttributes\n----------\nid : str\n    Unique identifier for this proposal instance\nvariable_id : str\n    Identifier of the proposing Variable agent\nvariable_name : str\n    Human-readable name of the proposing Variable\ntimestamp : datetime\n    Creation time in UTC (used for ordering/recency considerations)\naction_type : str\n    Type of action being proposed (e.g., 'set_value', 'intervene', 'adjust')\ntarget : Any\n    The target entity for the action (e.g., Variable, Mechanism, resource)\nparameters : dict[str, Any]\n    Action-specific parameters and configuration\nreasoning_result_id : Optional[str]\n    Reference to ReasoningResult that generated this proposal\njustification : str\n    Natural language explanation of why this proposal was generated\nconfidence : float\n    Variable's confidence in proposal efficacy [0.0, 1.0]\nstatus : ProposalStatus\n    Current lifecycle state in CCES pipeline\ndependencies : list[str]\n    IDs of other proposals that must complete before this one\nconstraints : list[Callable[[dict[str, Any]], bool]]\n    Validation functions that check proposal feasibility\nvalidation_errors : list[str]\n    Accumulated validation failure messages\nexecution_result : Optional[Any]\n    Result returned after successful execution\nexecution_time_ms : Optional[float]\n    Execution duration in milliseconds (filled post-execution)\n\nExamples\n--------\n>>> # Variable proposes a temperature adjustment\n>>> from dynexus import ActionProposal\n>>> proposal = ActionProposal(\n...     variable_id=\"temp_sensor_1\",\n...     variable_name=\"ThermalRegulator\",\n...     action_type=\"adjust\",\n...     target=\"heating_element\",\n...     parameters={\"delta\": -5.0, \"gradual\": True},\n...     justification=\"Current temp 25°C exceeds target 22°C\",\n...     confidence=0.8,\n...     constraints=[check_power_budget, check_safety_limits]\n... )\n>>> proposal.status\n<ProposalStatus.PROPOSED: 'proposed'>",
          "methods": [
            {
              "name": "validate",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "context",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate proposal against all declared constraints.\n\nCalled by Runtime during filtering phase to check feasibility.\nEach constraint function receives the current system context\nand returns True if the proposal satisfies that constraint.\n\nParameters\n----------\ncontext : dict[str, Any]\n    Current system state and execution context. Typically includes:\n    - Current Variable values and states\n    - Resource availability\n    - Contract conditions\n    - Environmental factors\n\nReturns\n-------\nbool\n    True if proposal satisfies all constraints, False otherwise.\n\nNotes\n-----\n- Failed constraints are logged in `validation_errors`\n- Validation does NOT guarantee selection (Optimizer decides)\n- Validation is idempotent (can be called multiple times)\n\nExamples\n--------\n>>> context = {\n...     \"power_available\": 1500,\n...     \"safety_mode\": \"normal\",\n...     \"time\": \"14:30\"\n... }\n>>> proposal.validate(context)\nTrue\n\n>>> proposal.validation_errors\n[]  # Empty list indicates successful validation"
            },
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Serialize proposal for logging, inspection, or UI layers.\n\nCreates a JSON-serializable representation suitable for:\n- Debugging and monitoring dashboards\n- Audit logs and reproducibility records\n- Communication between CCES components\n- Export for external analysis tools\n\nReturns\n-------\ndict[str, Any]\n    Dictionary representation with key fields.\n    Note: Constraint functions are not serialized.\n\nExamples\n--------\n>>> proposal.to_dict()\n{\n    'id': 'proposal_abc123',\n    'variable': 'ThermalRegulator',\n    'action_type': 'adjust',\n    'target': 'heating_element',\n    'parameters': {'delta': -5.0, 'gradual': True},\n    'confidence': 0.8,\n    'justification': 'Current temp 25°C exceeds target 22°C',\n    'status': 'proposed',\n    'timestamp': '2025-12-27T14:34:46.495046+00:00'\n}"
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": []
    },
    "dynexus/math/statistics/core.py": {
      "path": "dynexus/math/statistics/core.py",
      "module_docstring": "Core data structures and base classes for statistical analysis.\n\nThis module provides the fundamental building blocks for all statistical\nanalyses in the package, including result containers, enums, and base\nclasses with comprehensive validation and serialization capabilities.\n\nClasses\n-------\nTrendDirection\n    Enumeration of possible trend directions in time series data.\nSeasonalityType\n    Enumeration of seasonality patterns in time series data.\nAnomalySeverity\n    Enumeration of anomaly severity levels based on statistical significance.\nStatisticalResult\n    Base class for all statistical analysis results.\nTrendAnalysisResult\n    Result of trend analysis on time series data.\nSeasonalityResult\n    Result of seasonality analysis on time series data.\nChangePointResult\n    Result of change point detection in time series data.\nUncertaintyResult\n    Result of uncertainty quantification analysis.\n\nNotes\n-----\n- All result classes include comprehensive validation in __post_init__\n- Metadata is designed for research reproducibility\n- Serialization methods support JSON export for documentation\n- Confidence estimates range from 0.0 to 1.0 with boundary warnings",
      "imports": [
        {
          "module": "__future__",
          "name": "annotations",
          "alias": null,
          "level": 0
        },
        {
          "module": "warnings",
          "alias": null
        },
        {
          "module": "dataclasses",
          "name": "dataclass",
          "alias": null,
          "level": 0
        },
        {
          "module": "dataclasses",
          "name": "field",
          "alias": null,
          "level": 0
        },
        {
          "module": "datetime",
          "name": "datetime",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "Enum",
          "alias": null,
          "level": 0
        },
        {
          "module": "enum",
          "name": "auto",
          "alias": null,
          "level": 0
        },
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "numpy",
          "alias": "np"
        }
      ],
      "classes": [
        {
          "name": "TrendDirection",
          "bases": [
            "Enum"
          ],
          "docstring": "Enumeration of possible trend directions in time series data.\n\nAttributes\n----------\nINCREASING : TrendDirection\n    The time series shows a statistically significant positive trend.\n    Values tend to increase over time.\nDECREASING : TrendDirection\n    The time series shows a statistically significant negative trend.\n    Values tend to decrease over time.\nSTABLE : TrendDirection\n    No statistically significant trend detected. The time series is\n    stationary or exhibits no clear directional movement.\nINSUFFICIENT_DATA : TrendDirection\n    Cannot determine trend due to insufficient data points or\n    non-numeric values.\nCYCLICAL : TrendDirection\n    The time series exhibits cyclical patterns without a clear\n    long-term direction.\n\nExamples\n--------\n>>> from dynexus import TrendDirection\n>>> trend = TrendDirection.INCREASING\n>>> print(f\"Detected trend: {trend.name}\")\nDetected trend: INCREASING\n>>> print(f\"Description: {trend.name.lower()} trend\")\nDescription: increasing trend",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "SeasonalityType",
          "bases": [
            "Enum"
          ],
          "docstring": "Enumeration of seasonality patterns in time series data.\n\nAttributes\n----------\nNONE : SeasonalityType\n    No significant seasonal pattern detected.\nDAILY : SeasonalityType\n    Daily seasonality (24-hour cycles).\nWEEKLY : SeasonalityType\n    Weekly seasonality (7-day cycles).\nMONTHLY : SeasonalityType\n    Monthly seasonality (~30-day cycles).\nQUARTERLY : SeasonalityType\n    Quarterly seasonality (~90-day cycles).\nYEARLY : SeasonalityType\n    Yearly seasonality (365-day cycles).\nMULTIPLE : SeasonalityType\n    Multiple seasonal patterns detected.\n\nExamples\n--------\n>>> from dynexus import SeasonalityType\n>>> seasonality = SeasonalityType.DAILY\n>>> if seasonality != SeasonalityType.NONE:\n...     print(f\"Detected {seasonality.name.lower()} seasonality\")\nDetected daily seasonality",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "AnomalySeverity",
          "bases": [
            "Enum"
          ],
          "docstring": "Enumeration of anomaly severity levels based on statistical significance.\n\nSeverity is determined by how many standard deviations an observation\nis from the expected value:\n\nAttributes\n----------\nNORMAL : AnomalySeverity\n    Within 2 standard deviations (≈95% confidence interval).\n    Not considered anomalous.\nMILD : AnomalySeverity\n    2-3 standard deviations from expected (≈95-99.7% interval).\n    Mild anomaly worthy of monitoring.\nMODERATE : AnomalySeverity\n    3-4 standard deviations from expected (≈99.7-99.994% interval).\n    Moderate anomaly requiring investigation.\nSEVERE : AnomalySeverity\n    4-5 standard deviations from expected (≈99.994-99.9999% interval).\n    Severe anomaly requiring immediate attention.\nEXTREME : AnomalySeverity\n    More than 5 standard deviations from expected.\n    Extreme anomaly indicating possible measurement error or\n    critical system failure.\n\nExamples\n--------\n>>> from dynexus import AnomalySeverity\n>>> severity = AnomalySeverity.SEVERE\n>>> if severity.value >= AnomalySeverity.MODERATE.value:\n...     print(f\"High severity anomaly detected: {severity.name}\")\nHigh severity anomaly detected: SEVERE",
          "methods": [],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "StatisticalResult",
          "bases": [],
          "docstring": "Base class for all statistical analysis results.\n\nProvides a standardized structure for returning statistical analysis\nresults with success status, confidence estimates, and comprehensive\nmetadata for research reproducibility.\n\nAttributes\n----------\nsuccess : bool\n    Whether the statistical analysis completed successfully.\n    If False, the result may contain partial or default values.\nconfidence : float\n    Confidence estimate in the analysis result, ranging from 0.0 to 1.0.\n    Represents the statistical confidence or reliability of the result.\nmetadata : Dict[str, Any]\n    Additional metadata about the analysis process, including:\n    - Method used and parameters\n    - Sample statistics\n    - Computational details\n    - Warnings or limitations\nerror_message : Optional[str]\n    Error message if the analysis failed. None if successful.\nexecution_time_ms : Optional[float]\n    Execution time in milliseconds for performance monitoring.\n\nExamples\n--------\n>>> from dynexus import StatisticalResult\n>>> result = StatisticalResult(\n...     success=True,\n...     confidence=0.95,\n...     metadata={\"method\": \"linear_regression\", \"n_samples\": 100},\n...     execution_time_ms=15.2\n... )\n>>> print(f\"Analysis succeeded with {result.confidence:.1%} confidence\")\nAnalysis succeeded with 95.0% confidence\n\nNotes\n-----\n- Confidence values are automatically validated in __post_init__\n- Metadata should include sufficient detail for reproducibility\n- Execution time helps with performance optimization\n- Error messages provide context for debugging failed analyses",
          "methods": [
            {
              "name": "__post_init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate the statistical result after initialization.\n\nPerforms validation checks and issues warnings for potential issues.\n\nRaises\n------\nValueError\n    If confidence is outside valid range [0, 1].\n\nWarns\n-----\nUserWarning\n    If confidence is close to boundary values (0 or 1) which may\n    indicate overconfidence or numerical issues."
            },
            {
              "name": "to_dict",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [],
              "is_async": false,
              "docstring": "Convert result to dictionary for serialization.\n\nReturns\n-------\nDict[str, Any]\n    Dictionary representation of the result suitable for JSON\n    serialization or storage.\n\nExamples\n--------\n>>> from dynexus import StatisticalResult\n>>> result = StatisticalResult(success=True, confidence=0.9)\n>>> result_dict = result.to_dict()\n>>> print(result_dict.keys())\ndict_keys(['success', 'confidence', 'metadata', 'error_message',\n            'execution_time_ms', 'timestamp'])"
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Return human-readable string representation."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "TrendAnalysisResult",
          "bases": [
            "StatisticalResult"
          ],
          "docstring": "Result of trend analysis on time series data.\n\nContains comprehensive information about detected trends including\ndirection, strength, statistical significance, and model parameters.\n\nAttributes\n----------\ndirection : TrendDirection\n    Direction of the detected trend (increasing, decreasing, stable, etc.).\nslope : float\n    Slope of the trend line (change per time unit).\n    Positive for increasing trends, negative for decreasing trends.\nintercept : float\n    Intercept of the trend line at time zero.\nr_squared : float\n    Coefficient of determination (R²) indicating goodness of fit.\n    Range: 0.0 (no fit) to 1.0 (perfect fit).\np_value : float\n    P-value for the trend significance test.\n    Typically compared against alpha=0.05 threshold.\nstrength : float\n    Normalized measure of trend strength from 0.0 (no trend) to 1.0\n    (strong trend). Combines slope magnitude and statistical significance.\nis_significant : bool\n    Whether the trend is statistically significant at the chosen\n    significance level (default alpha=0.05).\nsample_size : int\n    Number of data points used in the analysis.\nslope_se : Optional[float] = None\n    Standard error of the slope estimate. None if not calculated.\nconfidence_interval : Optional[Tuple[float, float]] = None\n    95% confidence interval for the slope. None if not calculated.\nresiduals : Optional[np.ndarray] = None\n    Residuals from the trend model. Useful for diagnostic checking.\n\nExamples\n--------\n>>> from dynexus import TrendAnalysisResult, TrendDirection\n>>> result = TrendAnalysisResult(\n...     success=True,\n...     confidence=0.92,\n...     direction=TrendDirection.INCREASING,\n...     slope=0.5,\n...     intercept=10.0,\n...     r_squared=0.85,\n...     p_value=0.001,\n...     strength=0.8,\n...     is_significant=True,\n...     sample_size=100\n... )\n>>> if result.is_significant:\n...     print(f\"Significant {result.direction.name.lower()} trend detected\")\n...     print(f\"Rate of change: {result.slope:.3f} per time unit\")\nSignificant increasing trend detected\nRate of change: 0.500 per time unit\n\nNotes\n-----\n- R² values close to 1.0 indicate the trend model explains most variance\n- P-values < 0.05 are typically considered statistically significant\n- Strength is calculated as: strength = |slope| * (1 - p_value) / scale_factor\n- Always check residuals for patterns indicating model misspecification",
          "methods": [
            {
              "name": "__post_init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate trend analysis result.\n\nPerforms additional validation specific to trend analysis results.\n\nRaises\n------\nValueError\n    If sample size is negative, R² is outside [0, 1], or p-value is\n    outside [0, 1].\n\nWarns\n-----\nRuntimeWarning\n    If R² is suspiciously high (>0.999) which may indicate overfitting."
            },
            {
              "name": "get_trend_description",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Get a human-readable description of the trend.\n\nReturns\n-------\nstr\n    Descriptive text summarizing the trend analysis result.\n\nExamples\n--------\n>>> from dynexus import TrendAnalysisResult, TrendDirection\n>>> result = TrendAnalysisResult(\n...     success=True, confidence=0.9,\n...     direction=TrendDirection.INCREASING,\n...     slope=0.5, intercept=0.5, r_squared=0.85, p_value=0.01,\n...     strength=0.7, is_significant=True, sample_size=50\n... )\n>>> print(result.get_trend_description())\nstatistically significant increasing trend (slope = 0.500, R² = 0.850, p = 0.010)"
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Return human-readable string representation."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "SeasonalityResult",
          "bases": [
            "StatisticalResult"
          ],
          "docstring": "Result of seasonality analysis on time series data.\n\nIdentifies and characterizes seasonal patterns in time series data,\nincluding period, strength, and decomposition components.\n\nAttributes\n----------\nseasonality_type : SeasonalityType\n    Type of seasonality detected (daily, weekly, monthly, etc.).\nperiod : Optional[int]\n    Primary seasonal period in time units. None if no seasonality detected.\nstrength : float\n    Strength of the seasonal pattern from 0.0 (no seasonality) to 1.0\n    (strong seasonality). Calculated as proportion of variance explained\n    by seasonal component.\nseasonal_component : Optional[np.ndarray]\n    Estimated seasonal component of the time series. Same length as\n    input data. None if seasonality not detected or decomposition failed.\ntrend_component : Optional[np.ndarray]\n    Estimated trend component from seasonal decomposition.\nresidual_component : Optional[np.ndarray]\n    Residuals after removing trend and seasonal components.\nseasonal_peaks : Optional[List[int]] = None\n    Indices of seasonal peaks within one period.\nseasonal_troughs : Optional[List[int]] = None\n    Indices of seasonal troughs within one period.\namplitude : Optional[float] = None\n    Amplitude of the seasonal pattern (peak-to-trough difference).\nphase : Optional[float] = None\n    Phase shift of the seasonal pattern in radians.\n\nExamples\n--------\n>>> from dynexus import SeasonalityResult, SeasonalityType\n>>> result = SeasonalityResult(\n...     success=True,\n...     confidence=0.88,\n...     seasonality_type=SeasonalityType.DAILY,\n...     period=24,\n...     strength=0.65,\n...     amplitude=15.2,\n...     metadata={\"method\": \"STL\", \"decomposition\": \"additive\"}\n... )\n>>> if result.strength > 0.3:\n...     print(f\"Strong {result.seasonality_type.name.lower()} seasonality detected\")\n...     print(f\"Period: {result.period} time units, Strength: {result.strength:.2f}\")\nStrong daily seasonality detected\nPeriod: 24 time units, Strength: 0.65\n\nNotes\n-----\n- Seasonality strength > 0.3 is typically considered meaningful\n- Multiple seasonal periods can be handled through the MULTIPLE type\n- Decomposition uses additive model by default: data = trend + seasonal + residual\n- For multiplicative patterns, consider log transformation first",
          "methods": [
            {
              "name": "__post_init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate seasonality analysis result.\n\nRaises\n------\nValueError\n    If period is not positive when seasonality is detected, or\n    strength is outside [0, 1]."
            },
            {
              "name": "get_seasonality_description",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Get a human-readable description of the seasonality.\n\nReturns\n-------\nstr\n    Descriptive text summarizing the seasonality analysis result.\n\nExamples\n--------\n>>> from dynexus import SeasonalityResult, SeasonalityType\n>>> result = SeasonalityResult(\n...     success=True,\n...     confidence=0.78,\n...     seasonality_type=SeasonalityType.WEEKLY,\n...     period=7,\n...     strength=0.45\n... )\n>>> print(result.get_seasonality_description())\nweekly seasonality detected (period=7, strength=0.45)"
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Return human-readable string representation."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "ChangePointResult",
          "bases": [
            "StatisticalResult"
          ],
          "docstring": "Result of change point detection in time series data.\n\nIdentifies points where the statistical properties of a time series\nchange significantly, indicating regime changes, interventions, or\nstructural breaks.\n\nAttributes\n----------\nchange_points : List[int]\n    Indices where change points are detected (0-based indexing).\nconfidence_scores : List[float]\n    Confidence scores for each detected change point [0, 1].\nchange_types : List[str]\n    Type of change at each point (e.g., 'mean', 'variance', 'trend', 'both').\nmagnitudes : List[float]\n    Magnitude of change at each point (normalized).\nsegments : List[Tuple[int, int]]\n    Start and end indices of homogeneous segments between change points.\nsegment_stats : List[Dict[str, Any]]\n    Statistical properties of each segment (mean, variance, etc.).\n\nExamples\n--------\n>>> from dynexus import ChangePointResult\n>>> result = ChangePointResult(\n...     success=True,\n...     confidence=0.87,\n...     change_points=[49, 99],\n...     confidence_scores=[0.92, 0.78],\n...     change_types=['mean', 'variance'],\n...     magnitudes=[2.5, 1.8],\n...     segments=[],\n...     segment_stats=[]\n... )\n>>> for i, cp in enumerate(result.change_points):\n...     print(f\"Change at index {cp}: {result.change_types[i]} \"\n...           f\"(confidence: {result.confidence_scores[i]:.2f})\")\nChange at index 49: mean (confidence: 0.92)\nChange at index 99: variance (confidence: 0.78)\n\nNotes\n-----\n- Change points divide the time series into statistically homogeneous segments\n- Multiple algorithms available: binary segmentation, PELT, window-based\n- Confidence scores help prioritize important changes\n- Consider false discovery rate control for multiple testing",
          "methods": [
            {
              "name": "__post_init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate change point detection result.\n\nRaises\n------\nValueError\n    If lengths of change_points, confidence_scores, change_types,\n    or magnitudes don't match."
            },
            {
              "name": "get_change_description",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Get a human-readable description of detected change points.\n\nReturns\n-------\nstr\n    Descriptive text summarizing the change point analysis.\n\nExamples\n--------\n>>> from dynexus import ChangePointResult\n>>> result = ChangePointResult(\n...     success=True,\n...     confidence=0.85,\n...     change_points=[50, 150],\n...     confidence_scores=[0.9, 0.8],\n...     change_types=['mean', 'variance'],\n...     magnitudes=[2.0, 1.5],\n...     segments=[(0, 49), (50, 149), (150, 199)],\n...     segment_stats=[{}, {}, {}]\n... )\n>>> print(result.get_change_description())\nDetected 2 change points at indices [50, 150] with mean confidence 0.85"
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Return human-readable string representation."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        },
        {
          "name": "UncertaintyResult",
          "bases": [
            "StatisticalResult"
          ],
          "docstring": "Result of uncertainty quantification analysis.\n\nQuantifies uncertainty in measurements, estimates, or predictions using\nstatistical methods like bootstrapping, Monte Carlo simulation, or\nanalytical error propagation.\n\nAttributes\n----------\nestimate : float\n    Point estimate (mean, median, or other central tendency).\nlower_bound : float\n    Lower bound of the confidence/credible interval.\nupper_bound : float\n    Upper bound of the confidence/credible interval.\ninterval_width : float\n    Width of the confidence interval (upper_bound - lower_bound).\ninterval_type : str\n    Type of interval ('confidence', 'credible', 'prediction').\nconfidence_level : float\n    Confidence level of the interval (e.g., 0.95 for 95% CI).\nstandard_error : Optional[float]\n    Standard error of the estimate. None if not applicable.\ndistribution_type : Optional[str]\n    Assumed or estimated distribution type.\ndistribution_params : Optional[Dict[str, float]]\n    Parameters of the distribution.\nsamples : Optional[np.ndarray]\n    Bootstrap or Monte Carlo samples used for estimation.\n\nExamples\n--------\n>>> from dynexus import UncertaintyResult\n>>> result = UncertaintyResult(\n...     success=True,\n...     confidence=0.95,\n...     estimate=10.5,\n...     lower_bound=9.8,\n...     upper_bound=11.2,\n...     interval_width=1.4,\n...     interval_type=\"confidence\",\n...     confidence_level=0.95,\n...     standard_error=0.35,\n...     metadata={\"method\": \"bootstrap\", \"n_samples\": 10000}\n... )\n>>> print(f\"Estimate: {result.estimate:.2f}\")\nEstimate: 10.50\n>>> print(f\"95% CI: ({result.lower_bound:.2f}, {result.upper_bound:.2f})\")\n95% CI: (9.80, 11.20)\n\nNotes\n-----\n- Bootstrap intervals are non-parametric and make fewer assumptions\n- Prediction intervals are wider than confidence intervals as they include\n  observation error\n- For small samples (<30), consider t-distribution based intervals\n- Bayesian credible intervals have different interpretation than\n  frequentist confidence intervals",
          "methods": [
            {
              "name": "__post_init__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "None",
              "decorators": [],
              "is_async": false,
              "docstring": "Validate uncertainty quantification result.\n\nRaises\n------\nValueError\n    If lower_bound > upper_bound, confidence_level not in (0, 1),\n    or interval_width doesn't match bounds."
            },
            {
              "name": "contains_value",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                },
                {
                  "name": "value",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [],
              "is_async": false,
              "docstring": "Check if a value is within the confidence interval.\n\nParameters\n----------\nvalue : float\n    Value to check against the confidence interval.\n\nReturns\n-------\nbool\n    True if the value is within the interval (inclusive),\n    False otherwise.\n\nExamples\n--------\n>>> from dynexus import UncertaintyResult\n>>> result = UncertaintyResult(\n...     success=True, confidence=0.9,\n...     estimate=10.0, lower_bound=9.0, upper_bound=11.0,\n...     interval_width=2.0, interval_type=\"confidence\",\n...     confidence_level=0.95\n... )\n>>> print(result.contains_value(10.5))\nTrue\n>>> print(result.contains_value(12.0))\nFalse"
            },
            {
              "name": "get_interval_description",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Get a human-readable description of the uncertainty result.\n\nReturns\n-------\nstr\n    Descriptive text summarizing the uncertainty analysis.\n\nExamples\n--------\n>>> from dynexus import UncertaintyResult\n>>> result = UncertaintyResult(\n...     success=True,\n...     confidence=0.97,\n...     estimate=10.5,\n...     lower_bound=9.8,\n...     upper_bound=11.2,\n...     interval_width=1.4,\n...     interval_type=\"confidence\",\n...     confidence_level=0.95\n... )\n>>> print(result.get_interval_description())\n95% confidence interval: 10.50 (9.80, 11.20) [width: 1.40]"
            },
            {
              "name": "__str__",
              "args": [
                {
                  "name": "self",
                  "type": "normal",
                  "annotation": null,
                  "default": null
                }
              ],
              "returns": "str",
              "decorators": [],
              "is_async": false,
              "docstring": "Return human-readable string representation."
            }
          ],
          "class_methods": [],
          "static_methods": [],
          "properties": []
        }
      ],
      "functions": [],
      "global_vars": []
    },
    "dynexus/math/statistics/utils.py": {
      "path": "dynexus/math/statistics/utils.py",
      "module_docstring": "Core statistical utilities with robust error handling.\n\nThis module provides fundamental statistical functions with comprehensive\nvalidation, NaN handling, and edge case protection. All methods are designed\nto be numerically stable and return sensible defaults when data is insufficient\nor invalid.\n\nClasses\n-------\nStatisticalUtils\n    Core statistical utilities with robust error handling.\n\nMethods\n-------\nsafe_mean(arr, default=0.0, skipna=True)\n    Calculate mean with NaN/Inf handling and validation.\nsafe_std(arr, default=0.0, ddof=1, skipna=True)\n    Calculate standard deviation with robust error handling.\nsafe_percentile(arr, q, default=0.0)\n    Calculate percentiles with edge case protection.\nis_numeric_array(arr)\n    Check if array contains convertible numeric values.\nremove_outliers_iqr(arr, multiplier=1.5)\n    Remove outliers using IQR method.\nwinsorize(arr, limits=(0.05, 0.05))\n    Winsorize array to limit extreme values.\ncalculate_skewness(arr)\n    Calculate sample skewness (Fisher-Pearson coefficient).\ncalculate_kurtosis(arr, fisher=True)\n    Calculate sample kurtosis.\n\nNotes\n-----\n- All methods handle NaN values appropriately (skip or propagate)\n- Default values are returned when calculation is not possible\n- Input validation prevents silent errors\n- Methods are stateless and thread-safe",
      "imports": [
        {
          "module": "typing",
          "name": "Any",
          "alias": null,
          "level": 0
        },
        {
          "module": "numpy",
          "alias": "np"
        },
        {
          "module": "scipy",
          "name": "stats",
          "alias": null,
          "level": 0
        },
        {
          "module": "utils.logging",
          "name": "get_logger",
          "alias": null,
          "level": 3
        },
        {
          "module": "itertools",
          "name": "combinations",
          "alias": null,
          "level": 0
        }
      ],
      "classes": [
        {
          "name": "StatisticalUtils",
          "bases": [],
          "docstring": "Core statistical utilities with robust error handling.\n\nProvides fundamental statistical functions with comprehensive\nvalidation, NaN handling, and edge case protection. All methods\nare designed to be numerically stable and return sensible defaults\nwhen data is insufficient or invalid.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import StatisticalUtils\n>>>\n>>> # Handle arrays with missing values\n>>> data = np.array([1.0, 2.0, np.nan, 4.0, 5.0])\n>>> mean = StatisticalUtils.safe_mean(data)\n>>> print(f\"Mean (ignoring NaN): {mean:.2f}\")\nMean (ignoring NaN): 3.00\n>>>\n>>> # Handle empty arrays gracefully\n>>> empty_data = np.array([])\n>>> std = StatisticalUtils.safe_std(empty_data, default=1.0)\n>>> print(f\"Std of empty array: {std:.2f}\")\nStd of empty array: 1.00",
          "methods": [],
          "class_methods": [],
          "static_methods": [
            {
              "name": "safe_mean",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "default",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "skipna",
                  "type": "normal",
                  "annotation": "bool",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate mean of array with robust error handling.\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\ndefault : float, optional\n    Default value to return if mean cannot be calculated.\nskipna : bool, optional\n    If True, skip NaN and Inf values. If False, NaN propagates.\n\nReturns\n-------\nfloat\n    Mean of valid values in array, or default if calculation fails.\n\nRaises\n------\nTypeError\n    If input is not a numpy array or convertible to one.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import StatisticalUtils\n>>> StatisticalUtils.safe_mean([1, 2, 3, 4, 5])\n3.0\n>>> StatisticalUtils.safe_mean([1, 2, np.nan, 4, 5])\n3.0\n>>> StatisticalUtils.safe_mean([])\n0.0\n>>> StatisticalUtils.safe_mean([], default=np.nan)\nnan"
            },
            {
              "name": "safe_std",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "default",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "ddof",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                },
                {
                  "name": "skipna",
                  "type": "normal",
                  "annotation": "bool",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate standard deviation with robust error handling.\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\ndefault : float, optional\n    Default value to return if std cannot be calculated.\nddof : int, optional\n    Delta degrees of freedom. The divisor used is N - ddof.\nskipna : bool, optional\n    If True, skip NaN and Inf values. If False, NaN propagates.\n\nReturns\n-------\nfloat\n    Standard deviation of valid values, or default if calculation fails.\n\nNotes\n-----\n- Returns default when the number of valid observations is <= ddof\n- Uses Bessel's correction by default (ddof=1)\n- For population std, use ddof=0\n\nExamples\n--------\n>>> from dynexus import StatisticalUtils\n>>> StatisticalUtils.safe_std([1, 2, 3, 4, 5])\n1.58...\n>>> StatisticalUtils.safe_std([1, 1, 1, 1, 1])\n0.0\n>>> StatisticalUtils.safe_std([1])\n0.0\n>>> StatisticalUtils.safe_std([])\n0.0"
            },
            {
              "name": "safe_percentile",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "q",
                  "type": "normal",
                  "annotation": "float | list[float]",
                  "default": null
                },
                {
                  "name": "default",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "float | np.ndarray",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate percentiles with edge case protection.\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\nq : float or list of floats\n    Percentile(s) to compute, in range [0, 100].\ndefault : float, optional\n    Default value to return if percentile cannot be calculated.\n\nReturns\n-------\nfloat or np.ndarray\n    Percentile value(s), or default if calculation fails.\n\nRaises\n------\nValueError\n    If q is outside [0, 100].\n\nExamples\n--------\n>>> from dynexus import StatisticalUtils\n>>> StatisticalUtils.safe_percentile([1, 2, 3, 4, 5], 50)\n3.0\n>>> StatisticalUtils.safe_percentile([1, 2, 3, 4, 5], [25, 50, 75])\narray([2., 3., 4.])\n>>> StatisticalUtils.safe_percentile([], 50)\n0.0"
            },
            {
              "name": "is_numeric_array",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "Any",
                  "default": null
                }
              ],
              "returns": "bool",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Check if array contains convertible numeric values.\n\nParameters\n----------\narr : Any\n    Input to check for numeric convertibility.\n\nReturns\n-------\nbool\n    True if array can be converted to a numeric dtype\n    (e.g. float) without conversion errors, False otherwise.\n\nExamples\n--------\n>>> from dynexus import StatisticalUtils\n>>> StatisticalUtils.is_numeric_array([1, 2, 3])\nTrue\n>>> StatisticalUtils.is_numeric_array([1.0, 2.0, 3.0])\nTrue\n>>> StatisticalUtils.is_numeric_array(['a', 'b', 'c'])\nFalse\n>>> StatisticalUtils.is_numeric_array([1, 'a', 3])\nFalse"
            },
            {
              "name": "remove_outliers_iqr",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "multiplier",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "np.ndarray",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Remove outliers using Interquartile Range (IQR) method.\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\nmultiplier : float, optional\n    Multiplier for IQR to determine outlier bounds.\n    Typical values: 1.5 (default) for moderate outliers,\n    3.0 for extreme outliers.\n\nReturns\n-------\nnp.ndarray\n    Array with outliers removed.\n\nNotes\n-----\n- Outliers are values below Q1 - multiplier*IQR or above Q3 + multiplier*IQR\n- Returns empty array if all non-NaN values are outliers\n- Preserves NaN values (they are not considered outliers)\n- If IQR is zero, no values are removed\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import StatisticalUtils\n>>> data = np.array([1, 2, 3, 4, 5, 100])\n>>> StatisticalUtils.remove_outliers_iqr(data)\narray([1, 2, 3, 4, 5])\n>>> StatisticalUtils.remove_outliers_iqr(data, multiplier=38.2)\narray([  1,   2,   3,   4,   5, 100])"
            },
            {
              "name": "winsorize",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "limits",
                  "type": "normal",
                  "annotation": "tuple[float, float]",
                  "default": null
                }
              ],
              "returns": "np.ndarray",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Winsorize array by limiting extreme values.\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\nlimits : tuple of float, optional\n    Tuple of (lower_limit, upper_limit) as proportions to trim\n    from each end. Each value in [0, 0.5].\n\nReturns\n-------\nnp.ndarray\n    Winsorized array with extreme values replaced by percentiles.\n\nRaises\n------\nValueError\n    If limits are outside [0, 0.5] or lower_limit + upper_limit >= 1.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import StatisticalUtils\n>>> data = np.array([1, 2, 3, 4, 5, 100])\n>>> StatisticalUtils.winsorize(data, limits=(0.1, 0.1))\narray([ 1.5,  2. ,  3. ,  4. ,  5. , 52.5])"
            },
            {
              "name": "calculate_skewness",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate sample skewness (Fisher-Pearson coefficient).\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\n\nReturns\n-------\nfloat\n    Sample skewness. Returns 0 for insufficient data.\n\nNotes\n-----\n- Skewness > 0: right-skewed (tail on right)\n- Skewness < 0: left-skewed (tail on left)\n- Skewness ≈ 0: approximately symmetric\n- Requires at least 3 non-NaN values\n\nExamples\n--------\n>>> from dynexus import StatisticalUtils\n>>> StatisticalUtils.calculate_skewness([1, 2, 3, 4, 5])\n0.0\n>>> StatisticalUtils.calculate_skewness([1, 1, 1, 2, 100])\n2.23..."
            },
            {
              "name": "calculate_kurtosis",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "fisher",
                  "type": "normal",
                  "annotation": "bool",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate sample kurtosis.\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\nfisher : bool, optional\n    If True (default), Fisher's definition is used (excess kurtosis).\n    If False, Pearson's definition is used.\n\nReturns\n-------\nfloat\n    Sample kurtosis. Returns 0 for insufficient data.\n\nNotes\n-----\n- Excess kurtosis (Fisher=True):\n* > 0: leptokurtic (heavy tails)\n* < 0: platykurtic (light tails)\n* = 0: mesokurtic (normal tails)\n- Requires at least 4 non-NaN values\n\nExamples\n--------\n>>> from dynexus import StatisticalUtils\n>>> StatisticalUtils.calculate_kurtosis([1, 2, 3, 4, 5])\n-1.2\n>>> StatisticalUtils.calculate_kurtosis([1, 1, 1, 1, 100])\n5.0"
            },
            {
              "name": "calculate_moments",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "max_order",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "dict[int, float]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate statistical central moments up to specified order.\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\nmax_order : int, optional\n    Maximum moment order to calculate (default 4).\n\nReturns\n-------\nDict[int, float]\n    Dictionary mapping moment order to calculated value.\n    Moment 1 is the mean; higher moments are central moments\n    normalized by N.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import StatisticalUtils\n>>> data = np.random.normal(0, 1, 1000)\n>>> moments = StatisticalUtils.calculate_moments(data, max_order=4)\n>>> print(f\"Mean: {moments[1]:.3f}\")\nMean: 0.001\n>>> print(f\"Variance: {moments[2]:.3f}\")\nVariance: 1.026"
            },
            {
              "name": "calculate_scale",
              "args": [
                {
                  "name": "arr",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "method",
                  "type": "normal",
                  "annotation": "str",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate robust scale estimates.\n\nParameters\n----------\narr : np.ndarray\n    Input array. Can contain NaN or Inf values.\nmethod : str, optional\n    Scale estimation method:\n    - 'mad': Median Absolute Deviation\n    - 'iqr': Interquartile Range\n    - 'sn': Qn estimator (robust to outliers)\n\nReturns\n-------\nfloat\n    Robust scale estimate.\n\nExamples\n--------\n>>> import numpy as np\n>>> from dynexus import StatisticalUtils\n>>> data = np.array([1, 2, 3, 4, 5, 100])  # Contains outlier\n>>> StatisticalUtils.calculate_robust_scale(data, 'mad')\n2.22...\n>>> StatisticalUtils.calculate_robust_scale(data, 'iqr')\n1.85..."
            },
            {
              "name": "calculate_linear_prediction",
              "args": [
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "timestamps",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "steps_ahead",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Perform linear regression prediction with confidence intervals.\n\nParameters\n----------\nvalues : np.ndarray\n    Historical values.\ntimestamps : np.ndarray\n    Corresponding timestamps (in seconds).\nsteps_ahead : int\n    Number of steps to predict ahead.\n\nReturns\n-------\ndict[str, Any]\n    Prediction results with confidence intervals."
            },
            {
              "name": "calculate_exponential_smoothing",
              "args": [
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "steps_ahead",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                },
                {
                  "name": "alpha",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Perform exponential smoothing prediction.\n\nParameters\n----------\nvalues : np.ndarray\n    Historical values.\nsteps_ahead : int\n    Number of steps to predict ahead.\nalpha : float\n    Smoothing factor (0 to 1).\n\nReturns\n-------\ndict[str, Any]\n    Prediction results with confidence bounds."
            },
            {
              "name": "perform_mann_kendall_test",
              "args": [
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Perform Mann-Kendall trend test.\n\nParameters\n----------\nvalues : np.ndarray\n    Time series values.\n\nReturns\n-------\ndict[str, Any]\n    Trend test results."
            },
            {
              "name": "calculate_correlation_analysis",
              "args": [
                {
                  "name": "series1",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "series2",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "significance_level",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate correlation analysis between two series.\n\nParameters\n----------\nseries1, series2 : np.ndarray\n    Input time series.\nsignificance_level : float\n    Statistical significance threshold.\n\nReturns\n-------\ndict[str, Any]\n    Correlation analysis results."
            },
            {
              "name": "perform_bootstrap_analysis",
              "args": [
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "list[float]",
                  "default": null
                },
                {
                  "name": "confidences",
                  "type": "normal",
                  "annotation": "list[float]",
                  "default": null
                },
                {
                  "name": "confidence_level",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                },
                {
                  "name": "n_samples",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Perform bootstrap uncertainty analysis.\n\nParameters\n----------\nvalues : list[float]\n    Sample values.\nconfidences : list[float]\n    Confidence values.\nconfidence_level : float\n    Confidence interval level.\nn_samples : int\n    Number of bootstrap samples.\n\nReturns\n-------\ndict[str, Any]\n    Bootstrap analysis results."
            },
            {
              "name": "basic_statistical_analysis",
              "args": [
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "list[float]",
                  "default": null
                },
                {
                  "name": "confidences",
                  "type": "normal",
                  "annotation": "list[float]",
                  "default": null
                },
                {
                  "name": "confidence_level",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Perform basic statistical analysis.\n\nParameters\n----------\nvalues : list[float]\n    Sample values.\nconfidences : list[float]\n    Confidence values.\nconfidence_level : float\n    Confidence interval level.\n\nReturns\n-------\ndict[str, Any]\n    Statistical analysis results."
            },
            {
              "name": "analyze_volatility",
              "args": [
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                }
              ],
              "returns": "dict[str, Any]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Analyze volatility in time series.\n\nParameters\n----------\nvalues : np.ndarray\n    Time series values.\n\nReturns\n-------\ndict[str, Any]\n    Volatility analysis results."
            },
            {
              "name": "detect_change_points",
              "args": [
                {
                  "name": "values",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "timestamps",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                }
              ],
              "returns": "list[dict[str, Any]]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Detect change points in time series.\n\nParameters\n----------\nvalues : np.ndarray\n    Time series values.\ntimestamps : np.ndarray\n    Corresponding timestamps.\n\nReturns\n-------\nlist[dict[str, Any]]\n    List of detected change points."
            },
            {
              "name": "calculate_trend_confidence",
              "args": [
                {
                  "name": "trend_result",
                  "type": "normal",
                  "annotation": "dict[str, Any]",
                  "default": null
                },
                {
                  "name": "sample_size",
                  "type": "normal",
                  "annotation": "int",
                  "default": null
                }
              ],
              "returns": "float",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Calculate confidence in trend analysis.\n\nParameters\n----------\ntrend_result : dict[str, Any]\n    Trend analysis results.\nsample_size : int\n    Number of data points.\n\nReturns\n-------\nfloat\n    Confidence score (0.0 to 1.0)."
            },
            {
              "name": "align_time_series",
              "args": [
                {
                  "name": "series1",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "timestamps1",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "series2",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "timestamps2",
                  "type": "normal",
                  "annotation": "np.ndarray",
                  "default": null
                },
                {
                  "name": "tolerance",
                  "type": "normal",
                  "annotation": "float",
                  "default": null
                }
              ],
              "returns": "tuple[np.ndarray, np.ndarray]",
              "decorators": [
                "staticmethod"
              ],
              "is_async": false,
              "docstring": "Align two time series to common timestamps.\n\nParameters\n----------\nseries1, series2 : np.ndarray\n    Time series values.\ntimestamps1, timestamps2 : np.ndarray\n    Corresponding timestamps.\ntolerance : float\n    Maximum time difference for alignment (seconds).\n\nReturns\n-------\ntuple[np.ndarray, np.ndarray]\n    Aligned series."
            }
          ],
          "properties": []
        }
      ],
      "functions": [
        {
          "name": "linear_prediction",
          "args": [
            {
              "name": "values",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "timestamps",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "steps_ahead",
              "type": "normal",
              "annotation": "int",
              "default": null
            }
          ],
          "returns": "dict[str, Any]",
          "decorators": [],
          "is_async": false,
          "docstring": "Provide a convenient wrapper for linear prediction."
        },
        {
          "name": "exponential_smoothing",
          "args": [
            {
              "name": "values",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "steps_ahead",
              "type": "normal",
              "annotation": "int",
              "default": null
            },
            {
              "name": "alpha",
              "type": "normal",
              "annotation": "float",
              "default": null
            }
          ],
          "returns": "dict[str, Any]",
          "decorators": [],
          "is_async": false,
          "docstring": "Provide a convenient wrapper for exponential smoothing."
        },
        {
          "name": "mann_kendall_test",
          "args": [
            {
              "name": "values",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            }
          ],
          "returns": "dict[str, Any]",
          "decorators": [],
          "is_async": false,
          "docstring": "Provide a convenient wrapper for Mann-Kendall test."
        },
        {
          "name": "correlation_analysis",
          "args": [
            {
              "name": "series1",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "series2",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "significance_level",
              "type": "normal",
              "annotation": "float",
              "default": null
            }
          ],
          "returns": "dict[str, Any]",
          "decorators": [],
          "is_async": false,
          "docstring": "Provide a convenient wrapper for correlation analysis."
        },
        {
          "name": "align_time_series",
          "args": [
            {
              "name": "series1",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "timestamps1",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "series2",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "timestamps2",
              "type": "normal",
              "annotation": "np.ndarray",
              "default": null
            },
            {
              "name": "tolerance",
              "type": "normal",
              "annotation": "float",
              "default": null
            }
          ],
          "returns": "tuple[np.ndarray, np.ndarray]",
          "decorators": [],
          "is_async": false,
          "docstring": "Provide a convenient wrapper for aligning time series."
        }
      ],
      "global_vars": [
        {
          "name": "logger",
          "annotation": null
        }
      ]
    }
  }
}