Skip to content

Commit 74e4631

Browse files
Rename some metadata
1 parent 74d9d42 commit 74e4631

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

src/Libraries/Microsoft.Extensions.AI.Evaluation.Quality/RelevanceTruthAndCompletenessEvaluator.cs

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -171,9 +171,9 @@ await JsonOutputFixer.RepairJsonAsync(
171171
result.AddDiagnosticToAllMetrics(
172172
EvaluationDiagnostic.Error(
173173
$"""
174-
Failed to repair the following response from the model and parse scores for '{RelevanceMetricName}', '{TruthMetricName}' and '{CompletenessMetricName}'.:
175-
{evaluationResponseText}
176-
"""));
174+
Failed to repair the following response from the model and parse scores for '{RelevanceMetricName}', '{TruthMetricName}' and '{CompletenessMetricName}'.:
175+
{evaluationResponseText}
176+
"""));
177177
}
178178
else
179179
{
@@ -186,10 +186,10 @@ await JsonOutputFixer.RepairJsonAsync(
186186
result.AddDiagnosticToAllMetrics(
187187
EvaluationDiagnostic.Error(
188188
$"""
189-
Failed to repair the following response from the model and parse scores for '{RelevanceMetricName}', '{TruthMetricName}' and '{CompletenessMetricName}'.:
190-
{evaluationResponseText}
191-
{ex}
192-
"""));
189+
Failed to repair the following response from the model and parse scores for '{RelevanceMetricName}', '{TruthMetricName}' and '{CompletenessMetricName}'.:
190+
{evaluationResponseText}
191+
{ex}
192+
"""));
193193
}
194194
}
195195
}
@@ -211,28 +211,28 @@ void UpdateResult()
211211

212212
if (!string.IsNullOrWhiteSpace(evaluationResponse.ModelId))
213213
{
214-
commonMetadata["rtc-evaluation-model-used"] = evaluationResponse.ModelId!;
214+
commonMetadata["evaluation-model-used"] = evaluationResponse.ModelId!;
215215
}
216216

217217
if (evaluationResponse.Usage is UsageDetails usage)
218218
{
219219
if (usage.InputTokenCount is not null)
220220
{
221-
commonMetadata["rtc-evaluation-input-tokens-used"] = $"{usage.InputTokenCount}";
221+
commonMetadata["evaluation-input-tokens-used"] = $"{usage.InputTokenCount}";
222222
}
223223

224224
if (usage.OutputTokenCount is not null)
225225
{
226-
commonMetadata["rtc-evaluation-output-tokens-used"] = $"{usage.OutputTokenCount}";
226+
commonMetadata["evaluation-output-tokens-used"] = $"{usage.OutputTokenCount}";
227227
}
228228

229229
if (usage.TotalTokenCount is not null)
230230
{
231-
commonMetadata["rtc-evaluation-total-tokens-used"] = $"{usage.TotalTokenCount}";
231+
commonMetadata["evaluation-total-tokens-used"] = $"{usage.TotalTokenCount}";
232232
}
233233
}
234234

235-
commonMetadata["rtc-evaluation-duration"] = duration;
235+
commonMetadata["evaluation-duration"] = duration;
236236

237237
NumericMetric relevance = result.Get<NumericMetric>(RelevanceMetricName);
238238
relevance.Value = rating.Relevance;

0 commit comments

Comments
 (0)