classEvaluation:"""Evaluate a set of rules on a set of nodes."""def__init__(self,rule_registry:RuleRegistry,manifest_loader:ManifestLoader,formatter:Formatter,scorer:Scorer,config:Config,)->None:"""Create an Evaluation object. Args: rule_registry: A rule registry to access rules. manifest_loader: A manifest loader to access dbt metadata. formatter: A formatter to display results. scorer: A scorer to compute scores. config: A configuration. """self._rule_registry=rule_registryself._manifest_loader=manifest_loaderself._formatter=formatterself._scorer=scorerself._config=config# For each evaluable, its resultsself.results:dict[Evaluable,EvaluableResultsType]={}# For each evaluable, its computed scoreself.scores:dict[Evaluable,Score]={}# The aggregated project scoreself.project_score:Scoredefevaluate(self)->None:"""Evaluate all rules."""rules=self._rule_registry.rules.values()forevaluableinchain(self._manifest_loader.models,self._manifest_loader.sources):# type inference on elements from `chain` is wonky# and resolves to superclass HasColumnsMixinevaluable=cast(Evaluable,evaluable)self.results[evaluable]={}forruleinrules:try:ifrule.should_evaluate(evaluable):result=rule.evaluate(evaluable,**rule.config)self.results[evaluable][rule.__class__]=resultexceptExceptionase:ifself._config.debug:traceback.print_exc()pdb.post_mortem()self.results[evaluable][rule.__class__]=eself.scores[evaluable]=self._scorer.score_evaluable(self.results[evaluable])self._formatter.evaluable_evaluated(evaluable,self.results[evaluable],self.scores[evaluable])# Compute score for projectself.project_score=self._scorer.score_aggregate_evaluables(list(self.scores.values()))# Add null check before calling project_evaluatedifself._manifest_loader.modelsorself._manifest_loader.sources:self._formatter.project_evaluated(self.project_score)
def__init__(self,rule_registry:RuleRegistry,manifest_loader:ManifestLoader,formatter:Formatter,scorer:Scorer,config:Config,)->None:"""Create an Evaluation object. Args: rule_registry: A rule registry to access rules. manifest_loader: A manifest loader to access dbt metadata. formatter: A formatter to display results. scorer: A scorer to compute scores. config: A configuration. """self._rule_registry=rule_registryself._manifest_loader=manifest_loaderself._formatter=formatterself._scorer=scorerself._config=config# For each evaluable, its resultsself.results:dict[Evaluable,EvaluableResultsType]={}# For each evaluable, its computed scoreself.scores:dict[Evaluable,Score]={}# The aggregated project scoreself.project_score:Score
defevaluate(self)->None:"""Evaluate all rules."""rules=self._rule_registry.rules.values()forevaluableinchain(self._manifest_loader.models,self._manifest_loader.sources):# type inference on elements from `chain` is wonky# and resolves to superclass HasColumnsMixinevaluable=cast(Evaluable,evaluable)self.results[evaluable]={}forruleinrules:try:ifrule.should_evaluate(evaluable):result=rule.evaluate(evaluable,**rule.config)self.results[evaluable][rule.__class__]=resultexceptExceptionase:ifself._config.debug:traceback.print_exc()pdb.post_mortem()self.results[evaluable][rule.__class__]=eself.scores[evaluable]=self._scorer.score_evaluable(self.results[evaluable])self._formatter.evaluable_evaluated(evaluable,self.results[evaluable],self.scores[evaluable])# Compute score for projectself.project_score=self._scorer.score_aggregate_evaluables(list(self.scores.values()))# Add null check before calling project_evaluatedifself._manifest_loader.modelsorself._manifest_loader.sources:self._formatter.project_evaluated(self.project_score)