LangChain - Evaluation and Testing
What will this custom metric return when evaluating
class CountErrorsMetric(BaseEvalMetric):
def evaluate(self, predictions, references):
errors = sum(p != r for p, r in zip(predictions, references))
return errors
metric.evaluate(['a', 'b', 'c'], ['a', 'b', 'd'])?class CountErrorsMetric(BaseEvalMetric):
def evaluate(self, predictions, references):
errors = sum(p != r for p, r in zip(predictions, references))
return errors
