Agentic AIC#verifiedVerified
Evaluator-Optimizer Agent Pattern in C#
An iterative refinement loop where an 'Evaluator' provides granular feedback on an 'Optimizer’s' output until quality thresholds are met.
How to Implement the Evaluator-Optimizer Agent Pattern in C#
1Step 1: Define the Feedback record and interfaces
public record Feedback(bool IsPass, string Critique, double Score);
public interface IOptimizer
{
Task<string> GenerateAsync(string task);
Task<string> RefineAsync(string current, string feedback);
}
public interface IEvaluator
{
Task<Feedback> CheckAsync(string output);
}2Step 2: Implement the iterative refinement loop
public static class RefinementLoop
{
private const int MaxIterations = 5;
public static async Task<string> RunAsync(
string task, IOptimizer optimizer, IEvaluator evaluator)
{
var currentOutput = await optimizer.GenerateAsync(task);
for (var i = 0; i < MaxIterations; i++)
{
var feedback = await evaluator.CheckAsync(currentOutput);
if (feedback.IsPass)
return currentOutput;
currentOutput = await optimizer.RefineAsync(
currentOutput, feedback.Critique);
}
return currentOutput;
}
}using System.Text.Json;
using Microsoft.Extensions.Logging;
// [step] Define strongly-typed feedback and configuration records
public record Feedback(
bool IsPass,
string Critique,
double Score,
Dictionary<string, double> Categories);
public record OptimizerConfig(
string Model, double Temperature, int MaxTokens);
public record EvaluatorConfig(
string Model, string Rubric, double PassThreshold);
public record RefinementOptions(
int MaxIterations = 5,
Action<int, Feedback>? OnIteration = null,
CancellationToken CancellationToken = default);
public record RefinementResult(
string Output, int Iterations, double FinalScore);
// [step] Optimizer agent with LLM integration
public sealed class OptimizerAgent(
OptimizerConfig config, ILogger<OptimizerAgent> logger)
{
public async Task<string> GenerateAsync(
string task, CancellationToken ct = default)
{
logger.LogInformation("Generating initial output for task");
return await CallLlmAsync($"Generate: {task}", ct);
}
public async Task<string> RefineAsync(
string current, string critique, CancellationToken ct = default)
{
logger.LogInformation("Refining output based on feedback");
var prompt = $"Improve the following output based on feedback.\n" +
$"Current output: {current}\n" +
$"Feedback: {critique}\n" +
"Provide the improved version:";
return await CallLlmAsync(prompt, ct);
}
private Task<string> CallLlmAsync(string prompt, CancellationToken ct)
{
ct.ThrowIfCancellationRequested();
// Replace with actual LLM API call
return Task.FromResult($"[LLM Response for: {prompt[..Math.Min(50, prompt.Length)]}...]");
}
}
// [step] Evaluator agent with rubric-based checking
public sealed class EvaluatorAgent(
EvaluatorConfig config, ILogger<EvaluatorAgent> logger)
{
public async Task<Feedback> CheckAsync(
string output, CancellationToken ct = default)
{
logger.LogInformation("Evaluating output against rubric");
var prompt = $"Evaluate against rubric: {config.Rubric}\n" +
$"Output: {output}\n" +
"Respond with JSON: {{ isPass, critique, score, categories }}";
var raw = await CallLlmAsync(prompt, ct);
return JsonSerializer.Deserialize<Feedback>(raw)
?? throw new InvalidOperationException("Failed to parse feedback");
}
private Task<string> CallLlmAsync(string prompt, CancellationToken ct)
{
ct.ThrowIfCancellationRequested();
return Task.FromResult(JsonSerializer.Serialize(new Feedback(
true, "Meets all criteria", 95,
new Dictionary<string, double>
{
["accuracy"] = 96, ["completeness"] = 94
})));
}
}
// [step] Production refinement loop with cancellation and telemetry
public static class RefinementLoop
{
public static async Task<RefinementResult> RunAsync(
string task,
OptimizerAgent optimizer,
EvaluatorAgent evaluator,
RefinementOptions options)
{
var ct = options.CancellationToken;
var currentOutput = await optimizer.GenerateAsync(task, ct);
for (var i = 0; i < options.MaxIterations; i++)
{
ct.ThrowIfCancellationRequested();
var feedback = await evaluator.CheckAsync(currentOutput, ct);
options.OnIteration?.Invoke(i + 1, feedback);
if (feedback.IsPass)
return new RefinementResult(currentOutput, i + 1, feedback.Score);
currentOutput = await optimizer.RefineAsync(
currentOutput, feedback.Critique, ct);
}
var final_ = await evaluator.CheckAsync(currentOutput, ct);
return new RefinementResult(
currentOutput, options.MaxIterations, final_.Score);
}
}Evaluator-Optimizer Agent Pattern Architecture
hourglass_empty
Rendering diagram...
lightbulb
Evaluator-Optimizer Agent Pattern in the Real World
“Think of a student writing an essay (Optimizer) and a teacher grading it with detailed feedback (Evaluator). The student revises based on the red-ink comments and resubmits. This cycle repeats until the essay meets the teacher’s standards—or the deadline (max iterations) is reached.”