Skip to content

Analysis

uncertainty_flow.analysis

Feature leverage analysis for uncertainty attribution.

FeatureLeverageAnalyzer

Analyze which features most influence prediction uncertainty.

Identifies leverage features by decomposing uncertainty into aleatoric (irreducible) and epistemic (reducible) components and computing how much each feature contributes to prediction interval width.

Parameters

model : BaseUncertaintyModel Fitted uncertainty model with predict() method confidence : float, default=0.9 Confidence level for prediction intervals n_perturbations : int, default=100 Number of perturbation samples for leverage estimation n_bins : int, default=10 Number of quantile bins for conditional decomposition leverage_threshold : float, default=0.5 Threshold for "high leverage" classification random_state : int, optional Random seed for reproducibility

Examples

import polars as pl from uncertainty_flow.models import QuantileForestForecaster from uncertainty_flow.analysis import FeatureLeverageAnalyzer

Train forecaster

model = QuantileForestForecaster(targets="demand", horizon=7) model.fit(train_data)

Identify leverage features

analyzer = FeatureLeverageAnalyzer(model) report = analyzer.analyze(test_data)

Filter to high-leverage features

high_leverage = report.filter(pl.col("leverage_score") > 0.5) print(high_leverage["feature"].to_list())

Source code in uncertainty_flow/analysis/leverage.py
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
class FeatureLeverageAnalyzer:
    """
    Analyze which features most influence prediction uncertainty.

    Identifies leverage features by decomposing uncertainty into aleatoric
    (irreducible) and epistemic (reducible) components and computing how
    much each feature contributes to prediction interval width.

    Parameters
    ----------
    model : BaseUncertaintyModel
        Fitted uncertainty model with predict() method
    confidence : float, default=0.9
        Confidence level for prediction intervals
    n_perturbations : int, default=100
        Number of perturbation samples for leverage estimation
    n_bins : int, default=10
        Number of quantile bins for conditional decomposition
    leverage_threshold : float, default=0.5
        Threshold for "high leverage" classification
    random_state : int, optional
        Random seed for reproducibility

    Examples
    --------
    >>> import polars as pl
    >>> from uncertainty_flow.models import QuantileForestForecaster
    >>> from uncertainty_flow.analysis import FeatureLeverageAnalyzer
    >>>
    >>> # Train forecaster
    >>> model = QuantileForestForecaster(targets="demand", horizon=7)
    >>> model.fit(train_data)
    >>>
    >>> # Identify leverage features
    >>> analyzer = FeatureLeverageAnalyzer(model)
    >>> report = analyzer.analyze(test_data)
    >>>
    >>> # Filter to high-leverage features
    >>> high_leverage = report.filter(pl.col("leverage_score") > 0.5)
    >>> print(high_leverage["feature"].to_list())
    """

    def __init__(
        self,
        model: "BaseUncertaintyModel",
        confidence: float = 0.9,
        n_perturbations: int = 100,
        n_bins: int = 10,
        leverage_threshold: float = 0.5,
        random_state: int | None = None,
    ):
        self.model = model
        self.confidence = confidence
        self.n_perturbations = n_perturbations
        self.n_bins = n_bins
        self.leverage_threshold = leverage_threshold
        self.random_state = random_state
        self._rng = np.random.default_rng(random_state)
        self._prediction_row_budget = 800

    def analyze(
        self,
        data: pl.DataFrame,
    ) -> pl.DataFrame:
        """
        Analyze feature leverage on prediction uncertainty.

        Computes leverage scores, aleatoric/epistemic decomposition,
        and actionable recommendations for each feature.

        Args:
            data: Feature DataFrame for leverage analysis

        Returns:
            Polars DataFrame with columns:
                - feature: Feature name
                - aleatoric_score: Irreducible uncertainty contribution
                - epistemic_score: Reducible uncertainty contribution
                - leverage_score: Total impact on prediction intervals
                - recommendation: Actionable insight

        Raises:
            InvalidDataError: If data is empty or contains invalid data
        """
        from ..utils.exceptions import InvalidDataError

        if data.height == 0:
            raise InvalidDataError("Cannot analyze leverage on empty DataFrame")

        baseline_pred = self.model.predict(data)
        baseline_width_matrix = _interval_width_matrix(baseline_pred, self.confidence)
        baseline_width = baseline_width_matrix[:, 0]
        n_repeats = self._effective_perturbation_count(data.height)

        results = []

        for feature_name in data.columns:
            feature_vals = data[feature_name].to_numpy()

            # Skip constant features
            if np.std(feature_vals) == 0:
                continue

            # Compute leverage score via permutation
            perturbed_width_stack, _ = self._predict_perturbation_effects(
                data, feature_name, n_repeats
            )
            leverage_score = self._compute_permutation_leverage(
                baseline_width,
                perturbed_width_stack[:, :, 0],
            )

            # Compute aleatoric/epistemic decomposition
            aleatoric_score, epistemic_score = self._compute_decomposition(
                feature_vals, baseline_width
            )

            # Generate recommendation
            rec_type = _generate_recommendation(
                aleatoric_score, epistemic_score, leverage_score, self.leverage_threshold
            )
            recommendation = _format_recommendation(rec_type)

            results.append(
                {
                    "feature": feature_name,
                    "aleatoric_score": float(aleatoric_score),
                    "epistemic_score": float(epistemic_score),
                    "leverage_score": float(leverage_score),
                    "recommendation": recommendation,
                }
            )

        if not results:
            return pl.DataFrame(
                schema={
                    "feature": pl.String,
                    "aleatoric_score": pl.Float64,
                    "epistemic_score": pl.Float64,
                    "leverage_score": pl.Float64,
                    "recommendation": pl.String,
                }
            )

        df = pl.DataFrame(results)
        return df.sort("leverage_score", descending=True)

    def _compute_permutation_leverage(
        self,
        baseline_width: np.ndarray,
        perturbed_widths: np.ndarray,
    ) -> float:
        """
        Compute leverage score via feature permutation.

        Measures how much prediction interval width changes when
        the feature is randomly permuted (breaking its relationship
        with the target).

        Args:
            baseline_width: Baseline prediction interval widths
            perturbed_widths: Interval widths with shape (n_perturbations, n_rows)

        Returns:
            Leverage score (absolute change in interval width)
        """
        width_deltas = np.abs(perturbed_widths - baseline_width.reshape(1, -1))
        return float(width_deltas.mean())

    def _effective_perturbation_count(self, n_rows: int) -> int:
        """
        Return a bounded perturbation count based on requested repeats and frame size.

        The analyzer keeps total perturbed prediction rows within a lightweight budget
        so leverage analysis stays practical on larger evaluation frames.
        """
        if n_rows <= 0:
            return 1

        max_repeats_by_rows = max(1, self._prediction_row_budget // n_rows)
        return max(1, min(self.n_perturbations, max_repeats_by_rows))

    def _predict_perturbation_effects(
        self,
        data: pl.DataFrame,
        feature_name: str,
        n_repeats: int,
    ) -> tuple[np.ndarray, np.ndarray]:
        """Predict all perturbation repeats for one feature in a single batched call."""
        if n_repeats <= 0:
            raise ValueError("n_repeats must be positive")

        frames = []
        feature_vals = data[feature_name].to_numpy()

        for _ in range(n_repeats):
            permuted_vals = self._rng.permutation(feature_vals)
            frames.append(data.with_columns(pl.Series(feature_name, permuted_vals)))

        batched = pl.concat(frames, rechunk=False)
        perturbed_pred = self.model.predict(batched)
        width_matrix = _interval_width_matrix(perturbed_pred, self.confidence)
        point_matrix = _point_matrix(perturbed_pred)

        n_rows = data.height
        n_targets = width_matrix.shape[1]
        reshaped_widths = width_matrix.reshape(n_repeats, n_rows, n_targets)
        reshaped_points = point_matrix.reshape(n_repeats, n_rows, point_matrix.shape[1])
        return reshaped_widths, reshaped_points

    def _compute_joint_leverage(
        self,
        baseline_width_matrix: np.ndarray,
        baseline_point_matrix: np.ndarray,
        perturbed_width_stack: np.ndarray,
        perturbed_point_stack: np.ndarray,
    ) -> tuple[float, float]:
        """Compute joint leverage and dependence shift across targets."""
        baseline_joint_scale = _joint_interval_scale(baseline_width_matrix)
        perturbed_joint_scales = np.stack(
            [_joint_interval_scale(width_matrix) for width_matrix in perturbed_width_stack],
            axis=0,
        )
        volume_shift = np.abs(perturbed_joint_scales - baseline_joint_scale.reshape(1, -1)).mean()

        baseline_corr = _rank_correlation_matrix(baseline_point_matrix)
        dependence_shifts = []
        for point_matrix in perturbed_point_stack:
            corr_delta = _rank_correlation_matrix(point_matrix) - baseline_corr
            dependence_shifts.append(_mean_upper_triangle_abs(corr_delta))

        dependence_shift = float(np.mean(dependence_shifts)) if dependence_shifts else 0.0
        return float(volume_shift + dependence_shift), dependence_shift

    def _compute_decomposition(
        self,
        feature_vals: np.ndarray,
        baseline_width: np.ndarray,
    ) -> tuple[float, float]:
        """
        Decompose uncertainty into aleatoric and epistemic components.

        Uses conditional variance decomposition:
        - Aleatoric: Mean of within-group variance (noise within bins)
        - Epistemic: Variance of between-group means (model uncertainty)

        Args:
            feature_vals: Feature values
            baseline_width: Prediction interval widths

        Returns:
            Tuple of (aleatoric_score, epistemic_score)
        """
        # Bin feature values into quantiles
        try:
            # Create Polars Series for qcut
            feature_series = pl.Series(feature_vals)
            binned = feature_series.qcut(self.n_bins, allow_duplicates=True)
            bin_labels = binned.to_numpy()
        except (ValueError, pl.ColumnNotFoundError, Exception):
            # Fallback: use equal-width bins if qcut fails
            bin_edges = np.linspace(np.min(feature_vals), np.max(feature_vals), self.n_bins + 1)
            bin_labels = np.digitize(feature_vals, bin_edges[:-1])

        # Compute within-group and between-group variance
        unique_bins = np.unique(bin_labels)

        if len(unique_bins) <= 1:
            # Only one bin, cannot decompose
            return 0.0, 0.0

        within_group_vars = []
        between_group_means = []

        for bin_label in unique_bins:
            bin_mask = bin_labels == bin_label
            bin_widths = baseline_width[bin_mask]

            if len(bin_widths) > 1:
                within_group_vars.append(np.var(bin_widths))
                between_group_means.append(np.mean(bin_widths))

        if not within_group_vars:
            return 0.0, 0.0

        # Aleatoric: mean of within-group variances
        aleatoric_score = np.mean(within_group_vars)

        # Epistemic: variance of between-group means
        epistemic_score = np.var(between_group_means) if len(between_group_means) > 1 else 0.0

        return aleatoric_score, epistemic_score

    def analyze_multivariate(
        self,
        data: pl.DataFrame,
    ) -> pl.DataFrame:
        """
        Analyze feature leverage for multivariate forecasting models.

        Extends leverage analysis to multiple targets, computing
        per-target and joint leverage scores.

        Args:
            data: Feature DataFrame for leverage analysis

        Returns:
            Polars DataFrame with columns:
                - feature: Feature name
                - target: Target name (or "joint" for multivariate impact)
                - aleatoric_score: Irreducible uncertainty contribution
                - epistemic_score: Reducible uncertainty contribution
                - leverage_score: Total impact on prediction intervals
                - dependence_shift: Rank-dependence change across targets
                - recommendation: Actionable insight
        """
        pred = self.model.predict(data)

        if len(pred._targets) <= 1:
            # Not multivariate, fall back to standard analysis
            return self.analyze(data)

        baseline_width_matrix = _interval_width_matrix(pred, self.confidence)
        baseline_point_matrix = _point_matrix(pred)
        target_names = list(pred._targets)
        n_repeats = self._effective_perturbation_count(data.height)
        results = []

        for feature_name in data.columns:
            feature_vals = data[feature_name].to_numpy()
            if np.std(feature_vals) == 0:
                continue

            perturbed_width_stack, perturbed_point_stack = self._predict_perturbation_effects(
                data,
                feature_name,
                n_repeats,
            )

            for target_idx, target_name in enumerate(target_names):
                baseline_width = baseline_width_matrix[:, target_idx]
                leverage_score = self._compute_permutation_leverage(
                    baseline_width,
                    perturbed_width_stack[:, :, target_idx],
                )
                aleatoric_score, epistemic_score = self._compute_decomposition(
                    feature_vals,
                    baseline_width,
                )
                recommendation = _format_recommendation(
                    _generate_recommendation(
                        aleatoric_score,
                        epistemic_score,
                        leverage_score,
                        self.leverage_threshold,
                    )
                )
                results.append(
                    {
                        "feature": feature_name,
                        "target": target_name,
                        "aleatoric_score": float(aleatoric_score),
                        "epistemic_score": float(epistemic_score),
                        "leverage_score": float(leverage_score),
                        "dependence_shift": 0.0,
                        "recommendation": recommendation,
                    }
                )

            joint_width = _joint_interval_scale(baseline_width_matrix)
            joint_leverage, dependence_shift = self._compute_joint_leverage(
                baseline_width_matrix,
                baseline_point_matrix,
                perturbed_width_stack,
                perturbed_point_stack,
            )
            joint_aleatoric, joint_epistemic = self._compute_decomposition(
                feature_vals,
                joint_width,
            )
            joint_recommendation = _format_recommendation(
                _generate_recommendation(
                    joint_aleatoric,
                    joint_epistemic,
                    joint_leverage,
                    self.leverage_threshold,
                )
            )
            results.append(
                {
                    "feature": feature_name,
                    "target": "joint",
                    "aleatoric_score": float(joint_aleatoric),
                    "epistemic_score": float(joint_epistemic),
                    "leverage_score": float(joint_leverage),
                    "dependence_shift": float(dependence_shift),
                    "recommendation": joint_recommendation,
                }
            )

        if results:
            return pl.DataFrame(results).sort(
                ["target", "leverage_score"],
                descending=[False, True],
            )

        return pl.DataFrame(
            schema={
                "feature": pl.String,
                "target": pl.String,
                "aleatoric_score": pl.Float64,
                "epistemic_score": pl.Float64,
                "leverage_score": pl.Float64,
                "dependence_shift": pl.Float64,
                "recommendation": pl.String,
            }
        )

    def summary(self) -> dict[str, Any]:
        """
        Return summary of the analyzer configuration.

        Returns:
            Dictionary with analyzer configuration
        """
        return {
            "confidence": self.confidence,
            "n_perturbations": self.n_perturbations,
            "n_bins": self.n_bins,
            "leverage_threshold": self.leverage_threshold,
            "random_state": self.random_state,
            "effective_prediction_row_budget": self._prediction_row_budget,
        }

analyze(data)

Analyze feature leverage on prediction uncertainty.

Computes leverage scores, aleatoric/epistemic decomposition, and actionable recommendations for each feature.

Parameters:

Name Type Description Default
data DataFrame

Feature DataFrame for leverage analysis

required

Returns:

Type Description
DataFrame

Polars DataFrame with columns: - feature: Feature name - aleatoric_score: Irreducible uncertainty contribution - epistemic_score: Reducible uncertainty contribution - leverage_score: Total impact on prediction intervals - recommendation: Actionable insight

Raises:

Type Description
InvalidDataError

If data is empty or contains invalid data

Source code in uncertainty_flow/analysis/leverage.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
def analyze(
    self,
    data: pl.DataFrame,
) -> pl.DataFrame:
    """
    Analyze feature leverage on prediction uncertainty.

    Computes leverage scores, aleatoric/epistemic decomposition,
    and actionable recommendations for each feature.

    Args:
        data: Feature DataFrame for leverage analysis

    Returns:
        Polars DataFrame with columns:
            - feature: Feature name
            - aleatoric_score: Irreducible uncertainty contribution
            - epistemic_score: Reducible uncertainty contribution
            - leverage_score: Total impact on prediction intervals
            - recommendation: Actionable insight

    Raises:
        InvalidDataError: If data is empty or contains invalid data
    """
    from ..utils.exceptions import InvalidDataError

    if data.height == 0:
        raise InvalidDataError("Cannot analyze leverage on empty DataFrame")

    baseline_pred = self.model.predict(data)
    baseline_width_matrix = _interval_width_matrix(baseline_pred, self.confidence)
    baseline_width = baseline_width_matrix[:, 0]
    n_repeats = self._effective_perturbation_count(data.height)

    results = []

    for feature_name in data.columns:
        feature_vals = data[feature_name].to_numpy()

        # Skip constant features
        if np.std(feature_vals) == 0:
            continue

        # Compute leverage score via permutation
        perturbed_width_stack, _ = self._predict_perturbation_effects(
            data, feature_name, n_repeats
        )
        leverage_score = self._compute_permutation_leverage(
            baseline_width,
            perturbed_width_stack[:, :, 0],
        )

        # Compute aleatoric/epistemic decomposition
        aleatoric_score, epistemic_score = self._compute_decomposition(
            feature_vals, baseline_width
        )

        # Generate recommendation
        rec_type = _generate_recommendation(
            aleatoric_score, epistemic_score, leverage_score, self.leverage_threshold
        )
        recommendation = _format_recommendation(rec_type)

        results.append(
            {
                "feature": feature_name,
                "aleatoric_score": float(aleatoric_score),
                "epistemic_score": float(epistemic_score),
                "leverage_score": float(leverage_score),
                "recommendation": recommendation,
            }
        )

    if not results:
        return pl.DataFrame(
            schema={
                "feature": pl.String,
                "aleatoric_score": pl.Float64,
                "epistemic_score": pl.Float64,
                "leverage_score": pl.Float64,
                "recommendation": pl.String,
            }
        )

    df = pl.DataFrame(results)
    return df.sort("leverage_score", descending=True)

analyze_multivariate(data)

Analyze feature leverage for multivariate forecasting models.

Extends leverage analysis to multiple targets, computing per-target and joint leverage scores.

Parameters:

Name Type Description Default
data DataFrame

Feature DataFrame for leverage analysis

required

Returns:

Type Description
DataFrame

Polars DataFrame with columns: - feature: Feature name - target: Target name (or "joint" for multivariate impact) - aleatoric_score: Irreducible uncertainty contribution - epistemic_score: Reducible uncertainty contribution - leverage_score: Total impact on prediction intervals - dependence_shift: Rank-dependence change across targets - recommendation: Actionable insight

Source code in uncertainty_flow/analysis/leverage.py
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def analyze_multivariate(
    self,
    data: pl.DataFrame,
) -> pl.DataFrame:
    """
    Analyze feature leverage for multivariate forecasting models.

    Extends leverage analysis to multiple targets, computing
    per-target and joint leverage scores.

    Args:
        data: Feature DataFrame for leverage analysis

    Returns:
        Polars DataFrame with columns:
            - feature: Feature name
            - target: Target name (or "joint" for multivariate impact)
            - aleatoric_score: Irreducible uncertainty contribution
            - epistemic_score: Reducible uncertainty contribution
            - leverage_score: Total impact on prediction intervals
            - dependence_shift: Rank-dependence change across targets
            - recommendation: Actionable insight
    """
    pred = self.model.predict(data)

    if len(pred._targets) <= 1:
        # Not multivariate, fall back to standard analysis
        return self.analyze(data)

    baseline_width_matrix = _interval_width_matrix(pred, self.confidence)
    baseline_point_matrix = _point_matrix(pred)
    target_names = list(pred._targets)
    n_repeats = self._effective_perturbation_count(data.height)
    results = []

    for feature_name in data.columns:
        feature_vals = data[feature_name].to_numpy()
        if np.std(feature_vals) == 0:
            continue

        perturbed_width_stack, perturbed_point_stack = self._predict_perturbation_effects(
            data,
            feature_name,
            n_repeats,
        )

        for target_idx, target_name in enumerate(target_names):
            baseline_width = baseline_width_matrix[:, target_idx]
            leverage_score = self._compute_permutation_leverage(
                baseline_width,
                perturbed_width_stack[:, :, target_idx],
            )
            aleatoric_score, epistemic_score = self._compute_decomposition(
                feature_vals,
                baseline_width,
            )
            recommendation = _format_recommendation(
                _generate_recommendation(
                    aleatoric_score,
                    epistemic_score,
                    leverage_score,
                    self.leverage_threshold,
                )
            )
            results.append(
                {
                    "feature": feature_name,
                    "target": target_name,
                    "aleatoric_score": float(aleatoric_score),
                    "epistemic_score": float(epistemic_score),
                    "leverage_score": float(leverage_score),
                    "dependence_shift": 0.0,
                    "recommendation": recommendation,
                }
            )

        joint_width = _joint_interval_scale(baseline_width_matrix)
        joint_leverage, dependence_shift = self._compute_joint_leverage(
            baseline_width_matrix,
            baseline_point_matrix,
            perturbed_width_stack,
            perturbed_point_stack,
        )
        joint_aleatoric, joint_epistemic = self._compute_decomposition(
            feature_vals,
            joint_width,
        )
        joint_recommendation = _format_recommendation(
            _generate_recommendation(
                joint_aleatoric,
                joint_epistemic,
                joint_leverage,
                self.leverage_threshold,
            )
        )
        results.append(
            {
                "feature": feature_name,
                "target": "joint",
                "aleatoric_score": float(joint_aleatoric),
                "epistemic_score": float(joint_epistemic),
                "leverage_score": float(joint_leverage),
                "dependence_shift": float(dependence_shift),
                "recommendation": joint_recommendation,
            }
        )

    if results:
        return pl.DataFrame(results).sort(
            ["target", "leverage_score"],
            descending=[False, True],
        )

    return pl.DataFrame(
        schema={
            "feature": pl.String,
            "target": pl.String,
            "aleatoric_score": pl.Float64,
            "epistemic_score": pl.Float64,
            "leverage_score": pl.Float64,
            "dependence_shift": pl.Float64,
            "recommendation": pl.String,
        }
    )

summary()

Return summary of the analyzer configuration.

Returns:

Type Description
dict[str, Any]

Dictionary with analyzer configuration

Source code in uncertainty_flow/analysis/leverage.py
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def summary(self) -> dict[str, Any]:
    """
    Return summary of the analyzer configuration.

    Returns:
        Dictionary with analyzer configuration
    """
    return {
        "confidence": self.confidence,
        "n_perturbations": self.n_perturbations,
        "n_bins": self.n_bins,
        "leverage_threshold": self.leverage_threshold,
        "random_state": self.random_state,
        "effective_prediction_row_budget": self._prediction_row_budget,
    }