From f069a059a08c72fed96f5224b6a220b5bf8ee322 Mon Sep 17 00:00:00 2001 From: Quentin Anthony Date: Tue, 10 Dec 2024 10:33:42 -0800 Subject: [PATCH] update header and subsection title --- blogs/ecosystems-and-partners/zyphra/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blogs/ecosystems-and-partners/zyphra/README.md b/blogs/ecosystems-and-partners/zyphra/README.md index ed8538f..3bb150b 100644 --- a/blogs/ecosystems-and-partners/zyphra/README.md +++ b/blogs/ecosystems-and-partners/zyphra/README.md @@ -9,7 +9,7 @@ category: Ecosystems and Partners language: English myst: html_meta: - "description lang=en": "In this blog, we demonstrate the first backwards kernels to surpass H100s for both transformers (Flash Attention v2) and hybrid models (Mamba2), which enables training foundation models on AMD Instinct MI300X accelerators." + "description lang=en": "This blog shows Zyphra's new training kernels for transformers and hybrid models on AMD Instinct MI300X accelerators, surpassing the H100s performance" "keywords": "Mamba, PyTorch, S4, S6, Mamba2, Transformer, Flash Attention, Optimization, Hardware-aware, Transformer, Attention, ROCm, Mi210, MI250, MI300, AI/ML, Generative AI" "property=og:locale": "en_US" --- @@ -17,7 +17,7 @@ myst: # Training Transformers and Hybrid models on AMD Instinct MI300X Accelerators -## Introduction +## Harnessing the MI300 Hardware Specs This blog is contributed by [Zyphra](https://www.zyphra.com/): a Palo Alto-based AI research lab and AMD Instinct Partner.