{% extends 'base.html' %} {% load static %} {% block content %}

DeepEval Dashboard

Total Evaluations
{{ total_evaluations }}
Active Datasets
{{ active_datasets }}
{% for dataset in datasets %}

Aggregate Metrics

{% with dataset.metrics as metrics %}
Answer Relevance
{{ metrics.answer_relevance|floatformat:2 }}
Faithfulness
{{ metrics.faithfulness|floatformat:2 }}
Contextual Relevancy
{{ metrics.contextual_relevancy|floatformat:2 }}
Hallucination Score
{{ metrics.hallucination|floatformat:2 }}
Toxicity Score
{{ metrics.toxicity|floatformat:2 }}
{% endwith %}

Dataset Information

{{ dataset.name }}

{{ dataset.enabled|yesno:"Active,Inactive" }}

Recent Evaluations

{% for eval in dataset.recent_evaluations %} {% endfor %}
Source ID User Input Query Average Evaluation Score (0-1) Evaluation Time
{{ eval.source_id }}
{{ eval.input }}
{{ eval.avg_score|floatformat:2 }}
{{ eval.created_at|timesince }} ago
{% endfor %}
{% endblock %}