Skip to content

Commit

Permalink
Rename from pybuda to forge (#208)
Browse files Browse the repository at this point in the history
  • Loading branch information
chandrasekaranpradeep authored Sep 2, 2024
1 parent 3dec39a commit 117123b
Show file tree
Hide file tree
Showing 1,511 changed files with 77,511 additions and 77,511 deletions.
4 changes: 2 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ blobgen_cmd_log
python_api/env
python_api/*.so
python_api/test_run_on_device_out
pybuda/pybuda/*.so
forge/forge/*.so
__pycache__
pybuda/pybuda.egg-info/
forge/forge.egg-info/

*.vcd

Expand Down
14 changes: 7 additions & 7 deletions .gitlab-ci.perf.yml
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
include:
- .gitlab-ci.wheels.yml

# PyBuda repo, Grayskull e150
# Forge repo, Grayskull e150
- ci/gitlab-test-lists/.gitlab-ci.grayskull_e150_perf_bfp8_b_nightly.yml
- ci/gitlab-test-lists/.gitlab-ci.grayskull_e150_perf_fp16_nightly.yml
- ci/gitlab-test-lists/.gitlab-ci.grayskull_e150_perf_release_nightly.yml

# PyBuda repo, Grayskull e75
# Forge repo, Grayskull e75
- ci/gitlab-test-lists/.gitlab-ci.grayskull_e75_perf_bfp8_b_nightly.yml
- ci/gitlab-test-lists/.gitlab-ci.grayskull_e75_perf_fp16_nightly.yml
- ci/gitlab-test-lists/.gitlab-ci.grayskull_e75_perf_release_nightly.yml

# PyBuda repo, Wormhole B0
# Forge repo, Wormhole B0
- ci/gitlab-test-lists/.gitlab-ci.wormhole_b0_silicon_perf_bfp8_b_nightly.yml
- ci/gitlab-test-lists/.gitlab-ci.wormhole_b0_silicon_perf_fp16_nightly.yml
- ci/gitlab-test-lists/.gitlab-ci.wormhole_b0_silicon_perf_release_nightly.yml
Expand All @@ -27,18 +27,18 @@ include:
- ci/gitlab-test-lists/benchmarking/.gitlab-ci.grayskull_e150_perf_release_public.yml

# Dissable other jobs from .gitlab-ci.wheels.yml
pybuda-gs-latest-bbe-wheel:
forge-gs-latest-bbe-wheel:
rules:
- if: ($CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_MESSAGE !~ /\[no_ci_perf/)

pybuda-wh-b0-latest-bbe-wheel:
forge-wh-b0-latest-bbe-wheel:
rules:
- if: ($CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_MESSAGE !~ /\[no_ci_perf/)

pybuda-gs-unittests:
forge-gs-unittests:
rules:
- if: ($CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_MESSAGE !~ /\[no_ci_perf/)

pybuda-wh-b0-unittests:
forge-wh-b0-unittests:
rules:
- if: ($CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_MESSAGE !~ /\[no_ci_perf/)
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,14 @@ set(STATIC_LIB_FLAGS -fPIC)
set(SHARED_LIB_FLAGS -fPIC)

add_subdirectory(third_party)
add_subdirectory(pybuda)
add_subdirectory(forge)
add_subdirectory(docs)

### Generate stubs for ttforge
### Run `cmake --build build -- make_stubs` to generate stubs
add_custom_target(make_stubs
COMMAND pip install mypy==1.10
COMMAND stubgen -m pybuda._C -m pybuda._C.autograd -m pybuda._C.graph -m pybuda._C.torch_device -m pybuda._C.runtime -o pybuda -v
COMMAND stubgen -m forge._C -m forge._C.autograd -m forge._C.graph -m forge._C.torch_device -m forge._C.runtime -o forge -v
COMMENT "Generating stubs for ttforge"
USES_TERMINAL
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
2 changes: 1 addition & 1 deletion compile_flags.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,5 @@
-I/usr/include/python3.8
-Igui_lib
-Ithird_party/json
-Ipybuda/csrc
-Iforge/csrc
-Ithird_party/fmt
2 changes: 2 additions & 0 deletions forge/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
add_subdirectory(csrc)
add_subdirectory(forge)
127 changes: 127 additions & 0 deletions forge/csrc/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
set(PYTHON_SITE_PACKAGES_DIR ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages)

set(CMAKE_PREFIX_PATH
${PYTHON_SITE_PACKAGES_DIR}/torch
${CMAKE_PREFIX_PATH})

find_package(Python3 COMPONENTS Development REQUIRED)
find_package(Torch REQUIRED)

set(TT_MLIR_ROOT_DIR ${CMAKE_SOURCE_DIR}/third_party/tt-mlir)
set(TTMLIR_INCLUDE_DIRS
${TT_MLIR_ROOT_DIR}/include
${TT_MLIR_ROOT_DIR}/build/include
${TT_MLIR_ROOT_DIR}/runtime/include)

set(TTFORGE_CSRC_INCLUDES
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_SOURCE_DIR}
${CMAKE_SOURCE_DIR}/third_party
${CMAKE_SOURCE_DIR}/third_party/fmt/include
${CMAKE_SOURCE_DIR}/third_party/pybind11/include
${CMAKE_SOURCE_DIR}/third_party/json/single_include
${CMAKE_SOURCE_DIR}/third_party/pybind11_json/include
${CMAKE_SOURCE_DIR}/third_party/tt-mlir/build/include
${CMAKE_SOURCE_DIR}/third_party/tt-mlir/runtime/include
${CMAKE_SOURCE_DIR}/third_party/tt-mlir/include
${TTMLIR_TOOLCHAIN_DIR}/include
${Python3_INCLUDE_DIRS}
${TTMLIR_INCLUDE_DIRS}
)

include_directories(${TTFORGE_CSRC_INCLUDES})
# This is workaround for utils/assert.hpp using ##__VA_ARGS__ which is not supported by clang
include_directories(SYSTEM ${CMAKE_SOURCE_DIR})
include_directories(SYSTEM ${TORCH_INCLUDE_DIRS})

add_subdirectory(graph_lib)
add_subdirectory(autograd)
add_subdirectory(shared_utils)
add_subdirectory(backend_api)
add_subdirectory(reportify)
add_subdirectory(runtime)
add_subdirectory(tt_torch_device)

### ttforge_csrc_objs ###

file(GLOB CPP_FILES
"forge_bindings.cpp"
"buda_passes.cpp"
"passes/*.cpp"
"lower_to_buda/common.cpp"
)

add_library(ttforge_csrc_objs OBJECT ${CPP_FILES})
target_compile_options(ttforge_csrc_objs PRIVATE ${STATIC_LIB_FLAGS} ${TTFORGE_CSRC_CFLAGS})
add_dependencies(ttforge_csrc_objs build_tt_mlir)

### End of ttforge_csrc_objs ###

######## ttforge_csrc ########

set(TTMLIR_LIB_DIR "${CMAKE_SOURCE_DIR}/third_party/tt-mlir/build/lib/SharedLib")
set(TTRUNTIME_LIB_DIR "${CMAKE_SOURCE_DIR}/third_party/tt-mlir/build/runtime/lib")
set(METAL_LIB_DIR "${CMAKE_SOURCE_DIR}/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib")
set(TORCH_LIB_DIR "${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/torch/lib")

add_library(ttforge_csrc SHARED)

set(METAL_LIB_DIR "${CMAKE_SOURCE_DIR}/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib")

# Because _ttnn library doesn't have lib prefix, this is workaround to make linking work
add_library(ttnn SHARED IMPORTED)
set_property(TARGET ttnn PROPERTY IMPORTED_LOCATION "${METAL_LIB_DIR}/_ttnn.so")

target_link_libraries(ttforge_csrc PRIVATE
graph
autograd
shared_utils
backend_api
reportify
tt_torch_device
runtime
ttforge_csrc_objs

# NOTE: ordering of the libraries will affect the linking
LLVM
MLIR

TTMLIR

xml2
curses
z
m
torch_python
c10
${TTFORGE_PYTHON_VERSION}
${TORCH_LIBRARIES}
)

target_compile_options(ttforge_csrc PRIVATE
${TTFORGE_CSRC_CFLAGS}
${CXXFLAGS}
${SHARED_LIB_FLAGS}
)

target_link_directories(ttforge_csrc PRIVATE
${TTMLIR_TOOLCHAIN_DIR}/lib
${TTMLIR_LIB_DIR}
${TTRUNTIME_LIB_DIR}
${METAL_LIB_DIR}
${TORCH_LIB_DIR})

### End of ttforge_csrc ###

#### Copy python module extension to ttforge directory ####

add_custom_target(run_after_ttforge_csrc ALL
COMMAND mkdir -p ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/forge
COMMAND cp $<TARGET_FILE:ttforge_csrc> ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/forge/_C.so
COMMAND touch -r $<TARGET_FILE:ttforge_csrc> ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/forge/_C.so
COMMAND ln -sf ${TTFORGE_VENV_DIR}/lib/${TTFORGE_PYTHON_VERSION}/site-packages/forge/_C.so ${CMAKE_SOURCE_DIR}/forge/forge/_C.so
COMMENT "Running run_after_ttforge_csrc to copy the python module extension to forge directory"
USES_TERMINAL
)

add_dependencies(run_after_ttforge_csrc ttforge_csrc)
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ void autograd2_engine::create_backward_graph(const grad_map &requires_grad_map)
if (output->is_loss_output())
{
// Grad of loss is 1. Create constant and use that as "input".
py::object eval_module = py::module_::import("pybuda.op.eval");
py::object eval_module = py::module_::import("forge.op.eval");
auto const_tensor = make_shared_py_object(
eval_module.attr("create_constant_tensor_from_tensor")
(std::vector<float>{1.0}, node->shape().as_vector(), false, node->output_df()));
Expand Down
File renamed without changes.
40 changes: 40 additions & 0 deletions forge/csrc/autograd/binding.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
// SPDX-FileCopyrightText: © 2024 Tenstorrent AI ULC
//
// SPDX-License-Identifier: Apache-2.0
#include "autograd/binding.hpp"

#include <vector>

std::tuple<Shape, std::vector<DimBroadcast>> get_op_shape(OpType type, std::vector<Shape> &operands, bool is_buda, TileDim tile_dim)
{
int tile_height = tt::graphlib::get_row_size_from_tile_size(tile_dim);
int tile_width = tt::graphlib::get_col_size_from_tile_size(tile_dim);
auto eval_module = is_buda ? py::module_::import("forge.op.eval.buda") : py::module_::import("forge.op.eval.forge");
py::function forge_shape = is_buda ? eval_module.attr("get_f_forge_shape")(type, tile_height, tile_width)
: eval_module.attr("get_f_forge_shape")(type);

std::vector<std::vector<std::uint32_t>> operand_tuples;
for(Shape &shape : operands)
operand_tuples.push_back(shape.as_vector());

py::tuple ret = forge_shape(operand_tuples);
Shape s = is_buda ? Shape::create_buda(ret[0].cast<std::vector<std::uint32_t>>(), tile_height, tile_width) :
Shape::create(ret[0].cast<std::vector<std::uint32_t>>());

return std::make_tuple(s, ret[1].cast<std::vector<DimBroadcast>>());
}

NodeContext insert_backward(
autograd_context context,
OpType type,
int operand,
const std::vector<NodeContext> &inputs,
NodeContext output,
NodeContext gradient)
{
auto eval_module = py::module_::import("forge.op.eval.forge");
py::function forge_backward = eval_module.attr("get_f_forge_backward")(type);

return forge_backward(context, operand, inputs, output, gradient).cast<NodeContext>();
}

File renamed without changes.
Loading

0 comments on commit 117123b

Please sign in to comment.