Building Custom Judge Evaluators
1
2
Prepare Test Data
df = pd.DataFrame(
[
{
'text': 'Google announces new AI chip designed to accelerate '
'machine learning workloads.',
'ground_truth': 'Sci/Tech',
},
{
'text': 'The Lakers defeated the Celtics 112-108 in overtime, '
'with LeBron James scoring 35 points.',
'ground_truth': 'Sports',
},
{
'text': 'Federal Reserve raises interest rates by 0.25% citing '
'persistent inflation concerns.',
'ground_truth': 'Business',
},
{
'text': 'United Nations Security Council votes to impose new '
'sanctions on North Korea.',
'ground_truth': 'World',
},
{
'text': 'Microsoft acquires gaming company Activision Blizzard '
'for $69 billion.',
'ground_truth': 'Sci/Tech',
},
]
)3
Create a CustomJudge
simple_judge = CustomJudge(
prompt_template="""
Determine the topic of the given news summary.
Pick one of: Sports, World, Sci/Tech, Business.
News Summary:
{{ news_summary }}
""",
output_fields={
'topic': {'type': 'string'},
'reasoning': {'type': 'string'},
},
model=LLM_MODEL_NAME,
credential=LLM_CREDENTIAL_NAME,
)4
Run Evaluator
results = []
for _, row in df.iterrows():
scores = simple_judge.score(inputs={'news_summary': row['text']})
scores_dict = {s.name: s for s in scores}
results.append(
{
'ground_truth': row['ground_truth'],
'predicted': scores_dict['topic'].label,
'reasoning': scores_dict['reasoning'].label,
}
)
results_df = pd.DataFrame(results)
accuracy = (results_df['ground_truth'] == results_df['predicted']).mean()
print(f'Accuracy: {accuracy:.0%}')
# Show misclassified
misclassified = results_df[results_df['ground_truth'] != results_df['predicted']]
if len(misclassified) > 0:
print(f'\nMisclassified ({len(misclassified)}):')
for _, row in misclassified.iterrows():
print(f' Expected: {row["ground_truth"]}, Predicted: {row["predicted"]}')Accuracy: 80%
Misclassified (1):
Expected: Sci/Tech, Predicted: Business5
Improve the Prompt
improved_judge = CustomJudge(
prompt_template="""
Determine the topic of the given news summary.
Use topic 'Sci/Tech' if the news summary is about a company or
business in the tech industry, or if the news summary is about
a scientific discovery or research, including health and medicine.
Use topic 'Sports' if the news summary is about a sports event
or athlete.
Use topic 'Business' if the news summary is about a company or
industry outside of science, technology, or sports.
Use topic 'World' if the news summary is about a global event
or issue.
News Summary:
{{ news_summary }}
""",
output_fields={
'topic': {
'type': 'string',
'choices': ['Sci/Tech', 'Sports', 'Business', 'World'],
},
'reasoning': {'type': 'string'},
},
model=LLM_MODEL_NAME,
credential=LLM_CREDENTIAL_NAME,
)improved_results = []
for _, row in df.iterrows():
scores = improved_judge.score(inputs={'news_summary': row['text']})
scores_dict = {s.name: s for s in scores}
improved_results.append(
{
'ground_truth': row['ground_truth'],
'predicted': scores_dict['topic'].label,
}
)
improved_df = pd.DataFrame(improved_results)
original_accuracy = (results_df['ground_truth'] == results_df['predicted']).mean()
improved_accuracy = (improved_df['ground_truth'] == improved_df['predicted']).mean()
print(f'Simple prompt: {original_accuracy:.0%}')
print(f'Improved prompt: {improved_accuracy:.0%}')Simple prompt: 80%
Improved prompt: 100%Output Field Types
Type
Description
Example Use
Using choices for Categorical Output
choices for Categorical OutputUsing description to Guide the LLM
description to Guide the LLM