Skip to content

Commit

Permalink
Base for word report i18n and split completion from maturity (#1385)
Browse files Browse the repository at this point in the history
* Base for word report i18n

* Add completion bar chart

* Add score calculation

* fix max scale and update templates
  • Loading branch information
ab-smith authored Jan 20, 2025
1 parent 4a0463e commit 9380fa7
Show file tree
Hide file tree
Showing 5 changed files with 262 additions and 9 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/backend-linters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
- name: Install ruff
working-directory: ${{env.working-directory}}
run: |
python -m pip install ruff==0.9.1
python -m pip install ruff==0.9.2
- name: Run ruff format check
working-directory: ${{env.working-directory}}
run: ruff format --check .
Expand Down
257 changes: 251 additions & 6 deletions backend/core/generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import matplotlib.pyplot as plt
import numpy as np

from django.utils.translation import gettext_lazy as _
# from icecream import ic

from django.db.models import Count
Expand Down Expand Up @@ -110,6 +111,140 @@ def plot_donut(data, colors=None):
return chart_buffer


def plot_completion_bar(data, colors=None, title=None):
"""
Create a vertical bar chart showing completion percentage per category
Args:
data (list): List of dictionaries with 'category' and 'value' keys
colors (list, optional): Custom color palette
title (str, optional): Chart title
Returns:
io.BytesIO: Buffer containing the bar chart image
"""
plt.close("all")

categories = [item["category"] for item in data]
values = [item["value"] for item in data]

default_colors = [
"#2196F3", # Blue
"#4CAF50", # Green
"#FFC107", # Amber
"#F44336", # Red
"#9C27B0", # Purple
]

plt.figure(figsize=(12, 6))
ax = plt.gca()

plot_colors = colors if colors is not None else default_colors[: len(categories)]
bars = plt.bar(categories, values, color=plot_colors)

# Add value labels on top of each bar
for bar in bars:
height = bar.get_height()
plt.text(
bar.get_x() + bar.get_width() / 2,
height,
f"{int(height)}%",
ha="center",
va="bottom",
)

# Customize the chart
plt.ylim(0, 100) # Set y-axis from 0 to 100 for percentages
plt.ylabel("Completion (%)")

# Rotate x-axis labels for better readability if needed
plt.xticks(rotation=45, ha="right")

if title:
plt.title(title)

plt.tight_layout()

# Save to buffer
chart_buffer = io.BytesIO()
plt.savefig(chart_buffer, format="png", dpi=300, bbox_inches="tight")
chart_buffer.seek(0)
plt.close()

return chart_buffer


def plot_category_radar(category_scores, max_score=100, colors=None, title=None):
"""
Create a radar/spider chart showing scores per category
Args:
category_scores (dict): Dictionary containing category scores from aggregate_category_scores()
max_score (float): Maximum possible score value (default: 100)
colors (list, optional): Custom color palette
title (str, optional): Chart title
Returns:
io.BytesIO: Buffer containing the radar chart image
"""
plt.close("all")

# Extract data
categories = [data["name"] for data in category_scores.values()]
scores = [data["average_score"] for data in category_scores.values()]

# Number of categories
N = len(categories)

default_colors = [
"#2196F3", # Blue
"#4CAF50", # Green
"#FFC107", # Amber
"#F44336", # Red
"#9C27B0", # Purple
]

# Compute angle for each axis
angles = [n / float(N) * 2 * np.pi for n in range(N)]

# Close the plot by appending the first value and angle
values = scores + scores[:1]
angles = angles + [angles[0]]

# Create the plot
plt.figure(figsize=(12, 12))
ax = plt.subplot(111, polar=True)

plot_colors = colors if colors is not None else default_colors[: len(categories)]

# Plot the scores
ax.plot(angles, values, "o-", linewidth=2, color=plot_colors[0])
ax.fill(angles, values, alpha=0.25, color=plot_colors[0])

# Fix axis to go in the right order and start at 12 o'clock
ax.set_theta_offset(np.pi / 2)
ax.set_theta_direction(-1)

# Draw axis lines for each angle and label
plt.xticks(angles[:-1], categories)

# Set y-axis limits based on provided max_score with 10% padding
ax.set_ylim(0, max_score * 1.1)

if title:
plt.title(title)

plt.tight_layout()

# Save to buffer
chart_buffer = io.BytesIO()
plt.savefig(chart_buffer, format="png", dpi=300, bbox_inches="tight")
chart_buffer.seek(0)
plt.close()

return chart_buffer


def plot_spider_chart(data, colors=None, title=None):
"""
Create a spider/radar chart from the input data
Expand Down Expand Up @@ -172,7 +307,7 @@ def plot_spider_chart(data, colors=None, title=None):
return chart_buffer


def gen_audit_context(id, doc, tree):
def gen_audit_context(id, doc, tree, lang):
def count_category_results(data):
def recursive_result_count(node_data):
# Initialize result counts for this node
Expand Down Expand Up @@ -205,6 +340,68 @@ def recursive_result_count(node_data):

return category_result_counts

def aggregate_category_scores(data):
"""
Aggregate scores per category from the tree structure, using existing score values.
Each scoreable item has a standard max_score of 100.
Args:
data (dict): Tree structure containing assessment data with score values
Returns:
dict: Dictionary with category URNs as keys and score information as values
"""

def recursive_score_calculation(node_data):
# Initialize score tracking for this node
scores = {
"total_score": 0, # Sum of all scores
"item_count": 0, # Number of scoreable items
"scored_count": 0, # Number of items that have been scored
}

# Check if the current node is scoreable
if node_data.get("is_scored", False):
scores["item_count"] = 1

if node_data.get("score") is not None:
scores["total_score"] = node_data["score"]
scores["scored_count"] = 1

# Recursively process children
for child_id, child_data in node_data.get("children", {}).items():
child_scores = recursive_score_calculation(child_data)

# Aggregate child scores
scores["total_score"] += child_scores["total_score"]
scores["item_count"] += child_scores["item_count"]
scores["scored_count"] += child_scores["scored_count"]

return scores

# Dictionary to store category scores
category_scores = {}

# Process only top-level nodes (categories)
for node_id, node_data in data.items():
if node_data.get("parent_urn") is None:
scores = recursive_score_calculation(node_data)

# Calculate average score for the category
average_score = 0
if scores["scored_count"] > 0:
average_score = scores["total_score"] / scores["scored_count"]

category_scores[node_data["urn"]] = {
"name": node_data["node_content"],
"total_score": scores["total_score"],
"item_count": scores["item_count"],
"scored_count": scores["scored_count"],
"average_score": round(average_score, 1),
}

return category_scores

context = dict()
audit = ComplianceAssessment.objects.get(id=id)

Expand All @@ -216,6 +413,15 @@ def recursive_result_count(node_data):

agg_drifts = list()

# Calculate category scores
category_scores = aggregate_category_scores(tree)
max_score = 100 # default
for node in tree.values():
if node.get("max_score") is not None:
max_score = node["max_score"]
break
print(category_scores)

for key, content in tree.items():
total = sum(result_counts[content["urn"]].values())
ok_items = result_counts[content["urn"]].get("compliant", 0) + result_counts[
Expand Down Expand Up @@ -247,15 +453,42 @@ def recursive_result_count(node_data):

aggregated["total"] = total

# temporary hack since the gettext_lazy wasn't consistent
i18n_dict = {
"en": {
"compliant": "Compliant",
"partially_compliant": "Partially compliant",
"non_compliant": "Non compliant",
"not_applicable": "Not applicable",
"not_assessed": "Not assessed",
},
"fr": {
"compliant": "Conformes",
"partially_compliant": "Partiellement conformes",
"non_compliant": "Non conformes",
"not_applicable": "Non applicables",
"not_assessed": "Non évalués",
},
}

donut_data = [
{"category": "Conforme", "value": aggregated["compliant"]},
{"category": i18n_dict[lang]["compliant"], "value": aggregated["compliant"]},
{
"category": "Partiellement conforme",
"category": i18n_dict[lang]["partially_compliant"],
"value": aggregated["partially_compliant"],
},
{"category": "Non conforme", "value": aggregated["non_compliant"]},
{"category": "Non applicable", "value": aggregated["not_applicable"]},
{"category": "Non évalué", "value": aggregated["not_assessed"]},
{
"category": i18n_dict[lang]["non_compliant"],
"value": aggregated["non_compliant"],
},
{
"category": i18n_dict[lang]["not_applicable"],
"value": aggregated["not_applicable"],
},
{
"category": i18n_dict[lang]["not_assessed"],
"value": aggregated["not_assessed"],
},
]

custom_colors = ["#2196F3"]
Expand All @@ -264,6 +497,11 @@ def recursive_result_count(node_data):
colors=custom_colors,
)

category_radar_buffer = plot_category_radar(
category_scores, max_score=max_score, colors=custom_colors
)
chart_category_radar = InlineImage(doc, category_radar_buffer, width=Cm(15))

requirement_assessments_objects = audit.get_requirement_assessments(
include_non_assessable=True
)
Expand Down Expand Up @@ -302,6 +540,10 @@ def recursive_result_count(node_data):
]
hbar_buffer = plot_horizontal_bar(ac_chart_data, colors=custom_colors)

completion_bar_buffer = plot_completion_bar(spider_data, colors=custom_colors)

chart_completion = InlineImage(doc, completion_bar_buffer, width=Cm(15))

res_donut = InlineImage(doc, plot_donut(donut_data), width=Cm(15))
chart_spider = InlineImage(doc, spider_chart_buffer, width=Cm(15))
ac_chart = InlineImage(doc, hbar_buffer, width=Cm(15))
Expand All @@ -312,12 +554,15 @@ def recursive_result_count(node_data):
"contributors": f"{authors}\n{reviewers}",
"req": aggregated,
"compliance_donut": res_donut,
"completion_bar": chart_completion,
"compliance_radar": chart_spider,
"drifts_per_domain": agg_drifts,
"chart_controls": ac_chart,
"p1_controls": p1_controls,
"ac_count": ac_total,
"igs": IGs,
"category_scores": category_scores,
"category_radar": chart_category_radar,
}

return context
Binary file not shown.
Binary file not shown.
12 changes: 10 additions & 2 deletions backend/core/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -3142,12 +3142,20 @@ def compliance_assessment_csv(self, request, pk):

@action(detail=True, methods=["get"])
def word_report(self, request, pk):
"""
Word report generation (Exec)
"""
lang = "en"
if request.user.preferences.get("lang") is not None:
lang = request.user.preferences.get("lang")
if lang not in ["fr", "en"]:
lang = "en"
template_path = (
Path(settings.BASE_DIR)
/ "core"
/ "templates"
/ "core"
/ "audit_report_template.docx"
/ f"audit_report_template_{lang}.docx"
)
doc = DocxTemplate(template_path)
_framework = self.get_object().framework
Expand All @@ -3160,7 +3168,7 @@ def word_report(self, request, pk):
)
implementation_groups = self.get_object().selected_implementation_groups
filter_graph_by_implementation_groups(tree, implementation_groups)
context = gen_audit_context(pk, doc, tree)
context = gen_audit_context(pk, doc, tree, lang)
doc.render(context)
buffer_doc = io.BytesIO()
doc.save(buffer_doc)
Expand Down

0 comments on commit 9380fa7

Please sign in to comment.