Skip to content

Commit

Permalink
Rename buda to forge (#221)
Browse files Browse the repository at this point in the history
  • Loading branch information
chandrasekaranpradeep authored Sep 4, 2024
1 parent fc211a7 commit 5bede5b
Show file tree
Hide file tree
Showing 396 changed files with 2,399 additions and 2,399 deletions.
4 changes: 2 additions & 2 deletions forge/csrc/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ add_subdirectory(tt_torch_device)

file(GLOB CPP_FILES
"forge_bindings.cpp"
"buda_passes.cpp"
"forge_passes.cpp"
"passes/*.cpp"
"lower_to_buda/common.cpp"
"lower_to_forge/common.cpp"
)

add_library(ttforge_csrc_objs OBJECT ${CPP_FILES})
Expand Down
8 changes: 4 additions & 4 deletions forge/csrc/autograd/binding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,20 @@

#include <vector>

std::tuple<Shape, std::vector<DimBroadcast>> get_op_shape(OpType type, std::vector<Shape> &operands, bool is_buda, TileDim tile_dim)
std::tuple<Shape, std::vector<DimBroadcast>> get_op_shape(OpType type, std::vector<Shape> &operands, bool is_forge, TileDim tile_dim)
{
int tile_height = tt::graphlib::get_row_size_from_tile_size(tile_dim);
int tile_width = tt::graphlib::get_col_size_from_tile_size(tile_dim);
auto eval_module = is_buda ? py::module_::import("forge.op.eval.buda") : py::module_::import("forge.op.eval.forge");
py::function forge_shape = is_buda ? eval_module.attr("get_f_forge_shape")(type, tile_height, tile_width)
auto eval_module = is_forge ? py::module_::import("forge.op.eval.lforge") : py::module_::import("forge.op.eval.forge");
py::function forge_shape = is_forge ? eval_module.attr("get_f_forge_shape")(type, tile_height, tile_width)
: eval_module.attr("get_f_forge_shape")(type);

std::vector<std::vector<std::uint32_t>> operand_tuples;
for(Shape &shape : operands)
operand_tuples.push_back(shape.as_vector());

py::tuple ret = forge_shape(operand_tuples);
Shape s = is_buda ? Shape::create_buda(ret[0].cast<std::vector<std::uint32_t>>(), tile_height, tile_width) :
Shape s = is_forge ? Shape::create_forge(ret[0].cast<std::vector<std::uint32_t>>(), tile_height, tile_width) :
Shape::create(ret[0].cast<std::vector<std::uint32_t>>());

return std::make_tuple(s, ret[1].cast<std::vector<DimBroadcast>>());
Expand Down
6 changes: 3 additions & 3 deletions forge/csrc/autograd/binding.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@ using DimBroadcast = tt::graphlib::DimBroadcast;
using TileDim = tt::TileDim;

std::tuple<Shape, std::vector<DimBroadcast>> get_op_shape(
OpType type, std::vector<Shape> &operands, bool is_buda, TileDim tile_dim = TileDim::Dim32x32);
inline Shape get_tm_shape(OpType type, Shape operand, bool is_buda)
OpType type, std::vector<Shape> &operands, bool is_forge, TileDim tile_dim = TileDim::Dim32x32);
inline Shape get_tm_shape(OpType type, Shape operand, bool is_forge)
{
Shape shape;
std::vector<Shape> operands = {operand};
std::vector<DimBroadcast> bcast;
std::tie(shape, bcast) = ::get_op_shape(type, operands, is_buda, operand.get_tile_dim());
std::tie(shape, bcast) = ::get_op_shape(type, operands, is_forge, operand.get_tile_dim());
return shape;
}
NodeContext insert_backward(
Expand Down
4 changes: 2 additions & 2 deletions forge/csrc/autograd/python_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@

namespace tt {

static bool has_newstyle_interface(std::string const &op_name, bool is_buda)
static bool has_newstyle_interface(std::string const &op_name, bool is_forge)
{
py::object eval_module =
is_buda ? py::module_::import("forge.op.eval.buda") : py::module_::import("forge.op.eval.forge");
is_forge ? py::module_::import("forge.op.eval.lforge") : py::module_::import("forge.op.eval.forge");
return eval_module.attr("has_newstyle_interface")(op_name).cast<bool>();
}

Expand Down
26 changes: 13 additions & 13 deletions forge/csrc/forge_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ namespace py = pybind11;

#include "autograd/python_bindings.hpp"
#include "backend_api/device_config.hpp"
#include "buda_passes.hpp"
#include "forge_passes.hpp"
#include "graph_lib/graph.hpp"
#include "graph_lib/python_bindings.hpp"
#include "lower_to_buda/common.hpp"
#include "lower_to_forge/common.hpp"
#include "passes/amp.hpp"
#include "passes/consteval.hpp"
#include "passes/fracture.hpp"
Expand Down Expand Up @@ -114,7 +114,7 @@ PYBIND11_MODULE(_C, m) {
py::module_ m_autograd = m.def_submodule("autograd", "Submodule defining autograd_engine.");
AutogradModule(m_autograd);

py::module_ m_passes = m.def_submodule("passes", "API to Buda Passes");
py::module_ m_passes = m.def_submodule("passes", "API to Forge Passes");
PassesModule(m_passes);

py::module_ m_torch_device = m.def_submodule("torch_device", "TT Torch Device");
Expand Down Expand Up @@ -162,8 +162,8 @@ PYBIND11_MODULE(_C, m) {
m.def("run_consteval_graph_pass", &passes::run_consteval_graph_pass);
m.def("run_post_autograd_graph_passes", &run_post_autograd_graph_passes);
m.def(
"run_pre_placer_buda_passes",
&run_pre_placer_buda_passes,
"run_pre_placer_forge_passes",
&run_pre_placer_forge_passes,
py::arg("graph"),
py::arg("device_config"),
py::arg("chip_ids") = std::vector<std::uint32_t>{0},
Expand Down Expand Up @@ -236,14 +236,14 @@ PYBIND11_MODULE(_C, m) {
.def_readonly("cols", &sparse::SparseCOO::cols)
.def_readonly("vals", &sparse::SparseCOO::vals);

py::class_<tt::sparse::SparseBUDA>(m, "SparseBUDA")
.def_readonly("sparse_indices", &sparse::SparseBUDA::sparse_indices)
.def_readonly("sparse_shape", &sparse::SparseBUDA::sparse_shape)
.def_readonly("zdim", &sparse::SparseBUDA::zdim)
.def_readonly("bcast_factor", &sparse::SparseBUDA::bcast_factor)
.def("get_sparse_tile_ptr_bits", &sparse::SparseBUDA::get_sparse_tile_ptr_bits)
.def("get_sparse_ublock_idx_bits", &sparse::SparseBUDA::get_sparse_ublock_idx_bits)
.def("get_sparse_tiles_and_encodings", [](tt::sparse::SparseBUDA &self, int grid_r) {
py::class_<tt::sparse::SparseFORGE>(m, "SparseFORGE")
.def_readonly("sparse_indices", &sparse::SparseFORGE::sparse_indices)
.def_readonly("sparse_shape", &sparse::SparseFORGE::sparse_shape)
.def_readonly("zdim", &sparse::SparseFORGE::zdim)
.def_readonly("bcast_factor", &sparse::SparseFORGE::bcast_factor)
.def("get_sparse_tile_ptr_bits", &sparse::SparseFORGE::get_sparse_tile_ptr_bits)
.def("get_sparse_ublock_idx_bits", &sparse::SparseFORGE::get_sparse_ublock_idx_bits)
.def("get_sparse_tiles_and_encodings", [](tt::sparse::SparseFORGE &self, int grid_r) {
return self.get_sparse_tiles_and_encodings(grid_r);
});

Expand Down
12 changes: 6 additions & 6 deletions forge/csrc/buda_passes.cpp → forge/csrc/forge_passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: Apache-2.0

#include "buda_passes.hpp"
#include "forge_passes.hpp"

#include <algorithm>
#include <map>
Expand Down Expand Up @@ -37,7 +37,7 @@
#include "passes/passes_utils.hpp"
#include "passes/post_autograd_graph_passes.hpp"
#include "passes/pre_lowering_passes.hpp"
#include "passes/pre_placer_buda_passes.hpp"
#include "passes/pre_placer_forge_passes.hpp"
#include "passes/print_graph.hpp"
#include "passes/replace_incommutable_patterns.hpp"
#include "passes/set_tile_dim.hpp"
Expand All @@ -61,7 +61,7 @@ void lower_reshape(Graph *, graphlib::OpNode *node)
{
graphlib::OpType op_type = node->op_type();
TT_ASSERT(op_type.attr.size() == 4);
op_type.buda_attrs = {
op_type.forge_attrs = {
{"w", std::get<int>(op_type.attr[0])},
{"z", std::get<int>(op_type.attr[1])},
{"r", std::get<int>(op_type.attr[2])},
Expand Down Expand Up @@ -234,7 +234,7 @@ graphlib::Graph* run_pre_lowering_passes(
}

// ********** Run lowering passes **********
std::unique_ptr<graphlib::Graph> run_pre_placer_buda_passes(
std::unique_ptr<graphlib::Graph> run_pre_placer_forge_passes(
graphlib::Graph *graph,
const DeviceConfig &device_config,
std::vector<std::uint32_t> chip_ids,
Expand All @@ -261,8 +261,8 @@ std::unique_ptr<graphlib::Graph> run_pre_placer_buda_passes(

passes::print_graph(graph, "PRE_PLACER");

// Create buda ops / tms
std::unique_ptr<graphlib::Graph> lowered_graph = lower_to_buda_ops(graph);
// Create forge ops / tms
std::unique_ptr<graphlib::Graph> lowered_graph = lower_to_forge_ops(graph);

// lower user-defined buffering queues to actual queue types
lower_to_buffering_queues(lowered_graph.get());
Expand Down
2 changes: 1 addition & 1 deletion forge/csrc/buda_passes.hpp → forge/csrc/forge_passes.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ std::vector<std::pair<graphlib::NodeId, graphlib::NodeId>> run_post_autograd_gra
graphlib::Graph *graph, py::object compiler_cfg_object);

// Run lowering passes
std::unique_ptr<graphlib::Graph> run_pre_placer_buda_passes(
std::unique_ptr<graphlib::Graph> run_pre_placer_forge_passes(
graphlib::Graph *graph,
const DeviceConfig &device_config,
std::vector<std::uint32_t> chip_ids = {0},
Expand Down
4 changes: 2 additions & 2 deletions forge/csrc/graph_lib/defines.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ enum NodeType
kInput,
kOutput,
kPyOp,
kBudaOp,
kBudaNaryTM,
kForgeOp,
kForgeNaryTM,
kQueue,
};

Expand Down
4 changes: 2 additions & 2 deletions forge/csrc/graph_lib/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -477,8 +477,8 @@ void Graph::copy_node_attributes(Node *src, Node *dst)
dst->set_epoch_type(src->get_epoch_type());
dst->set_output_df(src->output_df());
if (
((dst->node_type() == NodeType::kBudaOp) &&
(src->node_type() == NodeType::kBudaOp) && src->as<OpNode>()->is_gradient_op()) ||
((dst->node_type() == NodeType::kForgeOp) &&
(src->node_type() == NodeType::kForgeOp) && src->as<OpNode>()->is_gradient_op()) ||
((dst->node_type() == NodeType::kPyOp) &&
(src->node_type() == NodeType::kPyOp) && src->as<OpNode>()->is_gradient_op())
)
Expand Down
4 changes: 2 additions & 2 deletions forge/csrc/graph_lib/graph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class EdgeAttributes;
enum class IRLevel
{
IR_TT_FORGE,
IR_BUDA,
IR_FORGE,
IR_CONSTEVAL,
};

Expand Down Expand Up @@ -85,7 +85,7 @@ class Graph
// instead of copy-constructor, prefer to explicitly provide a clone() method
// for clearer semantics and avoid accidental copy-constructor usage.
// Optionally pass your own graph ptr to clone into otherwise,
// Returns a raw (buda-compatibility) pointer to heap-allocated deep copy of the input graph
// Returns a raw (forge-compatibility) pointer to heap-allocated deep copy of the input graph
Graph *clone(Graph *cloned = nullptr) const;

IRLevel get_ir_level() const { return ir_level_; }
Expand Down
8 changes: 4 additions & 4 deletions forge/csrc/graph_lib/node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ Shape Node::shape_of_operand(const Graph* graph, const Node* operand, bool ignor
if (ignore_broadcasts and tm.op == "broadcast")
continue;
std::vector<Shape> shapes = {operand_shape};
std::tuple<Shape, std::vector<DimBroadcast>> shape_data = get_op_shape(tm, shapes, graph->get_ir_level() == IRLevel::IR_BUDA);
std::tuple<Shape, std::vector<DimBroadcast>> shape_data = get_op_shape(tm, shapes, graph->get_ir_level() == IRLevel::IR_FORGE);
operand_shape = std::get<0>(shape_data);
TT_ASSERT(std::get<1>(shape_data).size() == 0, "TMs should not cause broadcasts");
}
Expand Down Expand Up @@ -89,7 +89,7 @@ void Node::clone(Node const* other, std::string const& name)

std::string Node::get_type() const
{
if (node_type_ == NodeType::kPyOp or node_type_ == NodeType::kBudaOp) {
if (node_type_ == NodeType::kPyOp or node_type_ == NodeType::kForgeOp) {
OpNode const* op = this->as<OpNode>();
return node_type_to_string(node_type_) + "::" + op->op_name();
} else {
Expand Down Expand Up @@ -117,8 +117,8 @@ std::string node_type_to_string(const NodeType& node_type)
case NodeType::kInput: return "Input";
case NodeType::kOutput: return "Output";
case NodeType::kQueue: return "Queue";
case NodeType::kBudaOp: return "BudaOp";
case NodeType::kBudaNaryTM: return "BudaNaryTM";
case NodeType::kForgeOp: return "ForgeOp";
case NodeType::kForgeNaryTM: return "ForgeNaryTM";
case NodeType::kPyOp: return "ForgeOp";
default: TT_ASSERT(false, "Invalid node type");
}
Expand Down
2 changes: 1 addition & 1 deletion forge/csrc/graph_lib/node.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#include <functional>
#include "graph_lib/defines.hpp"
#include "graph_lib/shape.hpp"
#include "lower_to_buda/common.hpp"
#include "lower_to_forge/common.hpp"

namespace tt {

Expand Down
Loading

0 comments on commit 5bede5b

Please sign in to comment.