from osmosis_ai import osmosis_reward, osmosis_rubric, evaluate_rubric
from dotenv import load_dotenv
load_dotenv()
# Local reward function
@osmosis_reward
def exact_match(solution_str: str, ground_truth: str, extra_info: dict = None, **kwargs) -> float:
return 1.0 if solution_str.strip() == ground_truth.strip() else 0.0
# Remote rubric evaluator
@osmosis_rubric
def semantic_eval(solution_str: str, ground_truth: str | None, extra_info: dict, **kwargs) -> float:
return evaluate_rubric(
rubric="Compare semantic similarity (0-1 scale)",
solution_str=solution_str,
ground_truth=ground_truth,
model_info={"provider": "openai", "model": "gpt-5"}
)
# Usage
solution = "The capital of France is Paris"
truth = "Paris is France's capital"
local_score = exact_match(solution, truth)
semantic_score = semantic_eval(solution, truth, {})
print(f"Exact match: {local_score}") # 0.0
print(f"Semantic: {semantic_score}") # ~1.0