Skip to content

Commit

Permalink
Add workflow for ttnn md files generation (#163)
Browse files Browse the repository at this point in the history
Add workflow for ttnn md files generation to nightly tests. Currently,
created files are not pushed to docs/ops/ttnn. As next iteration, these
files could be published on Pages. Removed "Runs on hardware" from
created files.
  • Loading branch information
ddilbazTT authored Jan 9, 2025
1 parent 42ed4f8 commit 8e336dc
Show file tree
Hide file tree
Showing 3 changed files with 78 additions and 20 deletions.
58 changes: 58 additions & 0 deletions .github/workflows/generate-ttnn-md.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
name: Generate TTNN MD Files

on:
workflow_call:
workflow_dispatch:
inputs:
run_id:
description: 'Build pipeline run ID to download artifacts from'
required: true
type: string

jobs:
generate_md:
timeout-minutes: 120
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4

- name: Setpyenv
uses: actions/setup-python@v2
with:
python-version: 3.11

- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install -r requirements.txt
- name: Set reusable strings
id: strings
shell: bash
run: |
echo "work-dir=$(pwd)" >> "$GITHUB_OUTPUT"
echo "install-output-dir=$(pwd)/install" >> "$GITHUB_OUTPUT"
echo "test-output-dir=$(pwd)/results/models/tests/" >> "$GITHUB_OUTPUT"
- name: Git safe dir
run: git config --global --add safe.directory ${{ steps.strings.outputs.work-dir }}

- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: models_op_per_op.xlsx
path: ${{ steps.strings.outputs.work-dir }}/results

- name: Generate TTNN MD Files
shell: bash
run: |
source env/activate
echo "${{ steps.strings.outputs.work-dir }}/tt_torch/tools/generate_md.py"
echo "${{ steps.strings.outputs.work-dir }}/results/models_op_per_op.xlsx"
python ${{ steps.strings.outputs.work-dir }}/tt_torch/tools/generate_md.py --excel_path ${{ steps.strings.outputs.work-dir }}/results/models_op_per_op.xlsx --md_dir ${{ steps.strings.outputs.work-dir }}/docs/ops/ttnn --json_dir ${{ steps.strings.outputs.work-dir }}/docs/ops/ttnn
- name: Upload TTNN MD Files to archive
uses: actions/upload-artifact@v4
with:
name: ttnn-md
path: ${{ steps.strings.outputs.work-dir }}/docs/ops/ttnn
4 changes: 4 additions & 0 deletions .github/workflows/nightly-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,7 @@ jobs:
needs: test
uses: ./.github/workflows/generate-model-report.yml
secrets: inherit
generate-ttnn-md:
needs: download-report
uses: ./.github/workflows/generate-ttnn-md.yml
secrets: inherit
36 changes: 16 additions & 20 deletions tt_torch/tools/generate_md.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,9 @@ def parse_json(self, json_path):
# if yes, this needs to be revised
# this indexing hasn't been tested with many json files
ttnn_mlir = ajs["programs"][0]["debug_info"]["mlir"]["source"]
status = "N/A"
pcc = "N/A"
atol = "N/A"
self.process_ops(ttnn_mlir, status, pcc, atol)
self.process_ops(ttnn_mlir, pcc, atol)

def parse_xlsx(self, excel_path):
"""
Expand All @@ -123,7 +122,7 @@ def parse_xlsx(self, excel_path):
df = pd.read_excel(excel_path, sheet_name="All Ops")

# Validate required columns are present
required_columns = ["Raw TTNNIR", "Torch Name", "Status", "PCC", "ATOL"]
required_columns = ["Raw TTNNIR", "Torch Name", "PCC", "ATOL"]
missing_columns = [col for col in required_columns if col not in df.columns]

if missing_columns:
Expand All @@ -138,20 +137,18 @@ def parse_xlsx(self, excel_path):
raw_ttnnir = row["Raw TTNNIR"].strip("'\"")

# Extract row details
status = row["Status"]
pcc = row["PCC"]
atol = row["ATOL"]

# Process operation details
self.process_ops(raw_ttnnir, status, pcc, atol)
self.process_ops(raw_ttnnir, pcc, atol)

def process_ops(self, ttnnir_string, status, pcc, atol):
def process_ops(self, ttnnir_string, pcc, atol):
"""
Process TTNN operations from an IR string, extracting shapes, layouts, and metadata.
Args:
ttnnir_string: TTNN Intermediate Representation string
status: Operation status code
pcc: Percent Correct Classification metric
atol: Absolute tolerance for numerical comparisons
"""
Expand Down Expand Up @@ -220,12 +217,6 @@ def process_ops(self, ttnnir_string, status, pcc, atol):
output_layouts.append(layout)
opToWrite["input_layouts"] = input_layouts
opToWrite["output_layouts"] = output_layouts
if status == 6.0:
opToWrite["runs_on_ttnn"] = "no"
elif status == 7.0:
opToWrite["runs_on_ttnn"] = "yes"
else:
opToWrite["runs_on_ttnn"] = "N/A"
opToWrite["pcc"] = pcc
opToWrite["atol"] = atol
if self.ops.get(opToWrite["name"]) is None:
Expand Down Expand Up @@ -257,15 +248,14 @@ def create_md_files(self, output_dir):
if dict_list:
# Write the table header
file.write(
"| Name | Input Shapes | Input Layouts | Attributes | Output Shapes | Output Layouts | Runs on TTNN | PCC | ATOL |\n"
"| Name | Input Shapes | Input Layouts | Attributes | Output Shapes | Output Layouts | PCC | ATOL |\n"
)
file.write(
"|------|--------------|---------------|------------|---------------|----------------|--------------|-----|------|\n"
"|------|--------------|---------------|------------|---------------|----------------|-----|------|\n"
)
# Write each dictionary in the array to the table
for item in dict_list:
name = item.get("name", "")
runs_on_ttnn = item.get("runs_on_ttnn", "")
pcc = item.get("pcc", "")
atol = item.get("atol", "")

Expand Down Expand Up @@ -295,7 +285,7 @@ def create_md_files(self, output_dir):
output_shapes = " <br> ".join(item.get("output_shapes", []))

file.write(
f"| {name} | {input_shapes} | {input_layouts_str} | {attributes} | {output_shapes} | {output_layouts_str} | {runs_on_ttnn} | {pcc} | {atol} |\n"
f"| {name} | {input_shapes} | {input_layouts_str} | {attributes} | {output_shapes} | {output_layouts_str} | {pcc} | {atol} |\n"
)

def create_json_data(self):
Expand All @@ -308,6 +298,11 @@ def create_json_data(self):

for item in dict_list:
# Process input layouts
pcc = "N/A" if pd.isna(item.get("pcc")) else str(item.get("pcc", "N/A"))
atol = (
"N/A" if pd.isna(item.get("atol")) else str(item.get("atol", "N/A"))
)

input_layouts = item.get("input_layouts", [])
processed_input_layouts = [
{
Expand Down Expand Up @@ -337,9 +332,8 @@ def create_json_data(self):
"attributes": item.get("attributes", {}),
"output_shapes": item.get("output_shapes", []),
"output_layouts": processed_output_layouts,
"runs_on_ttnn": item.get("runs_on_ttnn", ""),
"pcc": item.get("pcc", ""),
"atol": item.get("atol", ""),
"pcc": pcc,
"atol": atol,
}

processed_items.append(processed_item)
Expand Down Expand Up @@ -461,11 +455,13 @@ def save_json_files(self, output_dir):
if args.md_dir is not None:
try:
myOps.create_md_files(args.md_dir)
print(f"Successfully generated files in {args.md_dir}")
except Exception as e:
print(f"Exception occured at generate_md.py: {e}")

if args.json_dir is not None:
try:
myOps.save_json_files(args.json_dir)
print(f"Successfully generated files in {args.json_dir}")
except Exception as e:
print(f"Exception occured at generate_md.py: {e}")

0 comments on commit 8e336dc

Please sign in to comment.