This module is responsible for evaluating rules.
  
            Evaluation
    
        Evaluate a set of rules on a set of nodes.
              
                Source code in src/dbt_score/evaluation.py
                 | class Evaluation:
    """Evaluate a set of rules on a set of nodes."""
    def __init__(
        self,
        rule_registry: RuleRegistry,
        manifest_loader: ManifestLoader,
        formatter: Formatter,
        scorer: Scorer,
        config: Config,
    ) -> None:
        """Create an Evaluation object.
        Args:
            rule_registry: A rule registry to access rules.
            manifest_loader: A manifest loader to access dbt metadata.
            formatter: A formatter to display results.
            scorer: A scorer to compute scores.
            config: A configuration.
        """
        self._rule_registry = rule_registry
        self._manifest_loader = manifest_loader
        self._formatter = formatter
        self._scorer = scorer
        self._config = config
        # For each evaluable, its results
        self.results: dict[Evaluable, EvaluableResultsType] = {}
        # For each evaluable, its computed score
        self.scores: dict[Evaluable, Score] = {}
        # The aggregated project score
        self.project_score: Score
    def evaluate(self) -> None:
        """Evaluate all rules."""
        rules = self._rule_registry.rules.values()
        for evaluable in chain(
            self._manifest_loader.models.values(),
            self._manifest_loader.sources.values(),
            self._manifest_loader.snapshots.values(),
            self._manifest_loader.exposures.values(),
            self._manifest_loader.seeds.values(),
        ):
            # type inference on elements from `chain` is wonky
            # and resolves to superclass HasColumnsMixin
            evaluable = cast(Evaluable, evaluable)
            self.results[evaluable] = {}
            for rule in rules:
                try:
                    if rule.should_evaluate(evaluable):
                        result = rule.evaluate(evaluable, **rule.config)
                        self.results[evaluable][rule.__class__] = result
                except Exception as e:
                    if self._config.debug:
                        traceback.print_exc()
                        pdb.post_mortem()
                    self.results[evaluable][rule.__class__] = e
            self.scores[evaluable] = self._scorer.score_evaluable(
                self.results[evaluable]
            )
            self._formatter.evaluable_evaluated(
                evaluable, self.results[evaluable], self.scores[evaluable]
            )
        # Compute score for project
        self.project_score = self._scorer.score_aggregate_evaluables(
            list(self.scores.values())
        )
        # Add null check before calling project_evaluated
        if (
            self._manifest_loader.models
            or self._manifest_loader.sources
            or self._manifest_loader.snapshots
            or self._manifest_loader.exposures
            or self._manifest_loader.seeds
        ):
            self._formatter.project_evaluated(self.project_score)
  | 
 
               
  
            __init__(rule_registry, manifest_loader, formatter, scorer, config)
    
        Create an Evaluation object.
Parameters:
    
      
        
          | Name | 
          Type | 
          Description | 
          Default | 
        
      
      
          
            
                rule_registry
             | 
            
                  RuleRegistry
             | 
            
              
                A rule registry to access rules. 
               
             | 
            
                required
             | 
          
          
            
                manifest_loader
             | 
            
                  ManifestLoader
             | 
            
              
                A manifest loader to access dbt metadata. 
               
             | 
            
                required
             | 
          
          
            
                formatter
             | 
            
                  Formatter
             | 
            
              
                A formatter to display results. 
               
             | 
            
                required
             | 
          
          
            
                scorer
             | 
            
                  Scorer
             | 
            
              
                A scorer to compute scores. 
               
             | 
            
                required
             | 
          
          
            
                config
             | 
            
                  Config
             | 
            
              
             | 
            
                required
             | 
          
      
    
            
              Source code in src/dbt_score/evaluation.py
               | def __init__(
    self,
    rule_registry: RuleRegistry,
    manifest_loader: ManifestLoader,
    formatter: Formatter,
    scorer: Scorer,
    config: Config,
) -> None:
    """Create an Evaluation object.
    Args:
        rule_registry: A rule registry to access rules.
        manifest_loader: A manifest loader to access dbt metadata.
        formatter: A formatter to display results.
        scorer: A scorer to compute scores.
        config: A configuration.
    """
    self._rule_registry = rule_registry
    self._manifest_loader = manifest_loader
    self._formatter = formatter
    self._scorer = scorer
    self._config = config
    # For each evaluable, its results
    self.results: dict[Evaluable, EvaluableResultsType] = {}
    # For each evaluable, its computed score
    self.scores: dict[Evaluable, Score] = {}
    # The aggregated project score
    self.project_score: Score
  | 
 
             
     
 
            evaluate()
    
        Evaluate all rules.
            
              Source code in src/dbt_score/evaluation.py
               | def evaluate(self) -> None:
    """Evaluate all rules."""
    rules = self._rule_registry.rules.values()
    for evaluable in chain(
        self._manifest_loader.models.values(),
        self._manifest_loader.sources.values(),
        self._manifest_loader.snapshots.values(),
        self._manifest_loader.exposures.values(),
        self._manifest_loader.seeds.values(),
    ):
        # type inference on elements from `chain` is wonky
        # and resolves to superclass HasColumnsMixin
        evaluable = cast(Evaluable, evaluable)
        self.results[evaluable] = {}
        for rule in rules:
            try:
                if rule.should_evaluate(evaluable):
                    result = rule.evaluate(evaluable, **rule.config)
                    self.results[evaluable][rule.__class__] = result
            except Exception as e:
                if self._config.debug:
                    traceback.print_exc()
                    pdb.post_mortem()
                self.results[evaluable][rule.__class__] = e
        self.scores[evaluable] = self._scorer.score_evaluable(
            self.results[evaluable]
        )
        self._formatter.evaluable_evaluated(
            evaluable, self.results[evaluable], self.scores[evaluable]
        )
    # Compute score for project
    self.project_score = self._scorer.score_aggregate_evaluables(
        list(self.scores.values())
    )
    # Add null check before calling project_evaluated
    if (
        self._manifest_loader.models
        or self._manifest_loader.sources
        or self._manifest_loader.snapshots
        or self._manifest_loader.exposures
        or self._manifest_loader.seeds
    ):
        self._formatter.project_evaluated(self.project_score)
  |