From f6b1ec4c48f426a3d52c769a937b2f6b16ed748c Mon Sep 17 00:00:00 2001 From: dgolubovic Date: Fri, 26 Jul 2024 13:59:31 +0000 Subject: [PATCH] rename pybuda to tt_forge starting from .hpp files --- pybuda/csrc/buda_passes.cpp | 6 +++--- pybuda/csrc/graph_lib/graph.hpp | 2 +- pybuda/csrc/graph_lib/node.cpp | 6 +++--- pybuda/csrc/graph_lib/node.hpp | 6 +++--- pybuda/csrc/graph_lib/node_types.hpp | 2 +- pybuda/csrc/graph_lib/python_bindings.cpp | 17 ++++++++-------- pybuda/csrc/graph_lib/utils.cpp | 2 +- pybuda/csrc/passes/decomposing_context.cpp | 2 +- pybuda/csrc/passes/decomposing_context.hpp | 2 +- pybuda/csrc/passes/lowering_context.hpp | 2 +- pybuda/csrc/passes/pre_placer_buda_passes.cpp | 4 ++-- pybuda/csrc/passes/pre_placer_buda_passes.hpp | 2 +- .../passes/tests/test_erase_inverse_ops.cpp | 2 +- .../test_erase_unnecessary_4d_tm_sequence.cpp | 2 +- .../passes/tests/test_fuse_pad_conv2d.cpp | 2 +- ...transpose_pairs_into_slice_or_stack_tm.cpp | 8 ++++---- .../passes/tests/test_link_past_cache_ios.cpp | 10 +++++----- .../tests/test_move_index_to_mm_weights.cpp | 4 ++-- ...test_move_select_after_matmul_optional.cpp | 2 +- .../tests/test_past_cache_ublock_order.cpp | 2 +- pybuda/csrc/test/common.hpp | 2 +- .../csrc/tt_torch_device/python_bindings.cpp | 10 +++++----- pybuda/csrc/tt_torch_device/tt_device.cpp | 2 +- pybuda/csrc/tt_torch_device/tt_device.hpp | 20 +++++++++---------- pybuda/pybuda/_C/torch_device.pyi | 10 +++++----- pybuda/pybuda/torch_compile.py | 10 +++++----- utils/signal_handlers.hpp | 6 +++--- 27 files changed, 73 insertions(+), 72 deletions(-) diff --git a/pybuda/csrc/buda_passes.cpp b/pybuda/csrc/buda_passes.cpp index 4c5dddf0c..7d71cf614 100644 --- a/pybuda/csrc/buda_passes.cpp +++ b/pybuda/csrc/buda_passes.cpp @@ -90,7 +90,7 @@ run_post_initial_graph_passes(graphlib::Graph *graph, py::object compiler_cfg_ob passes::explicate_unsqueeze(graph); passes::fuse_conv2d_bias(graph); - auto inserted_node_id_mapping = decompose_pybuda_graph(graph, "get_f_pybuda_decompose", compiler_cfg); + auto inserted_node_id_mapping = decompose_tt_forge_graph(graph, "get_f_pybuda_decompose", compiler_cfg); auto chip_id_assignments = passes::fracture(graph, fracture_groups); return std::make_tuple(inserted_node_id_mapping, chip_id_assignments); } @@ -176,7 +176,7 @@ std::vector> run_post_optimize_dec std::shared_ptr compiler_cfg = make_shared_py_object(compiler_cfg_object); passes::print_graph(graph, "POST_OPTIMIZE"); - auto inserted_node_id_mapping = decompose_pybuda_graph(graph, "get_f_pybuda_decompose_post_optimize", compiler_cfg); + auto inserted_node_id_mapping = decompose_tt_forge_graph(graph, "get_f_pybuda_decompose_post_optimize", compiler_cfg); return inserted_node_id_mapping; } @@ -189,7 +189,7 @@ std::vector> run_post_autograd_gra passes::print_graph(graph, "POST_AUTOGRAD"); lower_bwd_gather_ops(graph); - return decompose_pybuda_graph(graph, "get_f_pybuda_decompose_post_autograd", compiler_cfg); + return decompose_tt_forge_graph(graph, "get_f_pybuda_decompose_post_autograd", compiler_cfg); } // ********** Run pre-lowering passes ********** diff --git a/pybuda/csrc/graph_lib/graph.hpp b/pybuda/csrc/graph_lib/graph.hpp index 92ae66a74..9c4abf364 100644 --- a/pybuda/csrc/graph_lib/graph.hpp +++ b/pybuda/csrc/graph_lib/graph.hpp @@ -44,7 +44,7 @@ class EdgeAttributes; enum class IRLevel { - IR_PYBUDA, + IR_TT_FORGE, IR_BUDA, IR_CONSTEVAL, }; diff --git a/pybuda/csrc/graph_lib/node.cpp b/pybuda/csrc/graph_lib/node.cpp index 3aadf10b4..6ccdf6812 100644 --- a/pybuda/csrc/graph_lib/node.cpp +++ b/pybuda/csrc/graph_lib/node.cpp @@ -19,12 +19,12 @@ NodeId Node::id() const { TT_ASSERT(unique_id_ >= 0); return unique_id_; } -NodeId Node::pybuda_id() const { - return pybuda_id_; +NodeId Node::tt_forge_id() const { + return tt_forge_id_; } void Node::set_id(NodeId node_id) { unique_id_ = node_id; } -void Node::set_pybuda_id(NodeId node_id) { pybuda_id_ = node_id; } +void Node::set_tt_forge_id(NodeId node_id) { tt_forge_id_ = node_id; } const std::string& Node::name() const { return name_; } void Node::set_name(const std::string& name) { name_ = name; } diff --git a/pybuda/csrc/graph_lib/node.hpp b/pybuda/csrc/graph_lib/node.hpp index 01889f385..214e13be2 100644 --- a/pybuda/csrc/graph_lib/node.hpp +++ b/pybuda/csrc/graph_lib/node.hpp @@ -44,7 +44,7 @@ class Node { private: std::string name_; NodeId unique_id_ = -1; - NodeId pybuda_id_ = -1; + NodeId tt_forge_id_ = -1; int padding_id = 0; @@ -59,9 +59,9 @@ class Node { virtual ~Node() = default; NodeId id() const; - NodeId pybuda_id() const; + NodeId tt_forge_id() const; void set_id(NodeId node_id); - void set_pybuda_id(NodeId node_id); + void set_tt_forge_id(NodeId node_id); const std::string& name() const; void set_name(const std::string& name); diff --git a/pybuda/csrc/graph_lib/node_types.hpp b/pybuda/csrc/graph_lib/node_types.hpp index cf62fb3ae..342b82955 100644 --- a/pybuda/csrc/graph_lib/node_types.hpp +++ b/pybuda/csrc/graph_lib/node_types.hpp @@ -438,7 +438,7 @@ class OpNode : public TaggedNode { return py_attr(attr_name)(args...).template cast(); } - IRLevel get_ir_level() const { return (node_type() == NodeType::kPyOp) ? IRLevel::IR_PYBUDA : IRLevel::IR_BUDA; } + IRLevel get_ir_level() const { return (node_type() == NodeType::kPyOp) ? IRLevel::IR_TT_FORGE : IRLevel::IR_BUDA; } const std::string &op_name() const { return op_type_.op; } const std::vector &op_attrs() const { return op_type_.attr; } void overwrite_op_attrs(std::vector op_attrs) { op_type_.attr = op_attrs; } diff --git a/pybuda/csrc/graph_lib/python_bindings.cpp b/pybuda/csrc/graph_lib/python_bindings.cpp index a60263f71..2202d456b 100644 --- a/pybuda/csrc/graph_lib/python_bindings.cpp +++ b/pybuda/csrc/graph_lib/python_bindings.cpp @@ -58,7 +58,7 @@ py::object eval_input_bw(Node *node, py::object inputs, bool is_buda); void GraphModule(py::module &m_graph) { py::class_(m_graph, "Graph") - .def(py::init([](std::string name) { return std::make_unique(graphlib::IRLevel::IR_PYBUDA, name); })) + .def(py::init([](std::string name) { return std::make_unique(graphlib::IRLevel::IR_TT_FORGE, name); })) .def("clone", [](Graph &self) { return self.clone(); }) .def("get_node_name", [](const Graph &self, const graphlib::NodeId id) { return self.node_by_id(id)->name(); }) .def("get_name", [](const Graph &self) { return self.name(); }) @@ -618,7 +618,7 @@ py::object eval_op(graphlib::OpType type, std::vector inputs, graphl py::object eval_module; switch (ir_level) { - case graphlib::IRLevel::IR_PYBUDA: eval_module = py::module_::import("pybuda.op.eval.pybuda"); break; + case graphlib::IRLevel::IR_TT_FORGE: eval_module = py::module_::import("pybuda.op.eval.pybuda"); break; case graphlib::IRLevel::IR_BUDA: eval_module = py::module_::import("pybuda.op.eval.buda"); break; case graphlib::IRLevel::IR_CONSTEVAL: eval_module = py::module_::import("pybuda.op.eval.pybuda"); break; } @@ -670,7 +670,7 @@ py::object eval_relu(py::object tensor, graphlib::OpType type) : "min"; graphlib::OpType relu("relu", {relu_threshold, relu_mode}); - tensor = eval_op(relu, inputs, graphlib::IRLevel::IR_PYBUDA); + tensor = eval_op(relu, inputs, graphlib::IRLevel::IR_TT_FORGE); } return tensor; } @@ -699,7 +699,7 @@ py::object eval_golden_transforms(graphlib::Node *node, py::object tensor, bool // if (!eval_for_output || (op_type.op != "reshape" && op_type.op != "transpose")) { - tensor = eval_op(op_type, {tensor}, graphlib::IRLevel::IR_PYBUDA); + tensor = eval_op(op_type, {tensor}, graphlib::IRLevel::IR_TT_FORGE); } } @@ -718,7 +718,7 @@ void eval_partial_datacopy_golden_transforms( for (auto const &op_type : golden_transforms) { - output_tensor = eval_op(op_type, {output_tensor}, graphlib::IRLevel::IR_PYBUDA); + output_tensor = eval_op(op_type, {output_tensor}, graphlib::IRLevel::IR_TT_FORGE); } if (ret.at(output_index).ptr() == nullptr) @@ -728,7 +728,8 @@ void eval_partial_datacopy_golden_transforms( else { graphlib::OpType overlay("add"); - ret.at(output_index) = eval_op(overlay, {ret.at(output_index), output_tensor}, graphlib::IRLevel::IR_PYBUDA); + ret.at(output_index) = eval_op(overlay, {ret.at(output_index), output_tensor}, graphlib::IRLevel::IR_TT_FORGE + ); } } @@ -1453,7 +1454,7 @@ eval_graph( } } - auto golden_node_id = (graph->get_ir_level() == graphlib::IRLevel::IR_BUDA) ? node->pybuda_id() : node->id(); + auto golden_node_id = (graph->get_ir_level() == graphlib::IRLevel::IR_BUDA) ? node->tt_forge_id() : node->id(); if (op_node->has_golden_id()) { golden_node_id = op_node->golden_id(); // if a different intermediate node is used as a reference... } @@ -1468,7 +1469,7 @@ eval_graph( // Check if there's a gradient to check if (gradient_edges.size() > 0) { Node* producer = graph->node_by_id(gradient_edges.at(0).producer_node_id); - auto node_id = (graph->get_ir_level() == graphlib::IRLevel::IR_BUDA) ? producer->pybuda_id() : producer->id(); + auto node_id = (graph->get_ir_level() == graphlib::IRLevel::IR_BUDA) ? producer->tt_forge_id() : producer->id(); auto golden_fwd = intermediate_golden_tensors.find(node_id); if (golden_fwd != intermediate_golden_tensors.end()) { bool is_valid = is_gradient_comparison_valid(graph, gradient_edges.at(0)); diff --git a/pybuda/csrc/graph_lib/utils.cpp b/pybuda/csrc/graph_lib/utils.cpp index df4628a5b..270b9d3fd 100644 --- a/pybuda/csrc/graph_lib/utils.cpp +++ b/pybuda/csrc/graph_lib/utils.cpp @@ -1946,7 +1946,7 @@ std::unique_ptr ConstEvalGraph::promote_node( Graph *runtime_graph, Node *runtime_node, std::unique_ptr &&consteval_node_free) { TT_ASSERT(not runtime_graph or runtime_node); - TT_ASSERT(not runtime_graph or runtime_graph->get_ir_level() == IRLevel::IR_PYBUDA); + TT_ASSERT(not runtime_graph or runtime_graph->get_ir_level() == IRLevel::IR_TT_FORGE); graph_updated_since_autograd = true; diff --git a/pybuda/csrc/passes/decomposing_context.cpp b/pybuda/csrc/passes/decomposing_context.cpp index 59546df06..672cf3c19 100644 --- a/pybuda/csrc/passes/decomposing_context.cpp +++ b/pybuda/csrc/passes/decomposing_context.cpp @@ -158,7 +158,7 @@ NodeContext DecomposingContext::tensor(std::shared_ptr tensor, graphlib::S return NodeContext(new_node); } -std::vector> decompose_pybuda_graph( +std::vector> decompose_tt_forge_graph( Graph *graph, const char *dispatcher_name, std::shared_ptr compiler_cfg) { std::vector> inserted_node_id_mapping; diff --git a/pybuda/csrc/passes/decomposing_context.hpp b/pybuda/csrc/passes/decomposing_context.hpp index fa033ce2d..9de48bf88 100644 --- a/pybuda/csrc/passes/decomposing_context.hpp +++ b/pybuda/csrc/passes/decomposing_context.hpp @@ -64,7 +64,7 @@ class DecomposingContext inline std::shared_ptr get_compiler_cfg() { return compiler_cfg; } }; -std::vector> decompose_pybuda_graph( +std::vector> decompose_tt_forge_graph( Graph* graph, const char* dispatcher_name, std::shared_ptr compiler_cfg); } // namespace tt diff --git a/pybuda/csrc/passes/lowering_context.hpp b/pybuda/csrc/passes/lowering_context.hpp index 18f0d9be7..56b2e4e38 100644 --- a/pybuda/csrc/passes/lowering_context.hpp +++ b/pybuda/csrc/passes/lowering_context.hpp @@ -9,7 +9,7 @@ namespace tt { -// Lowering context provide an API for Python to create lowered Buda ops, given a PyBuda op and +// Lowering context provide an API for Python to create lowered Buda ops, given a TTForge op and // its operands. using Graph = graphlib::Graph; using Node = graphlib::Node; diff --git a/pybuda/csrc/passes/pre_placer_buda_passes.cpp b/pybuda/csrc/passes/pre_placer_buda_passes.cpp index a55998065..66b43cf90 100644 --- a/pybuda/csrc/passes/pre_placer_buda_passes.cpp +++ b/pybuda/csrc/passes/pre_placer_buda_passes.cpp @@ -1142,7 +1142,7 @@ void constant_pre_broadcast(Graph *graph) } } -// Convert PyBuda graph to Buda graph +// Convert TTForge graph to Buda graph std::unique_ptr lower_to_buda_ops(Graph *graph) { @@ -1187,7 +1187,7 @@ std::unique_ptr lower_to_buda_ops(Graph *graph) Node* old_node = graph->node_by_id(old_node_id); Node* new_node = old_to_new.at(old_node); - new_node->set_pybuda_id(old_node->id()); + new_node->set_tt_forge_id(old_node->id()); copy_operand_edges_to_new_graph(graph, new_graph.get(), old_node, new_node, old_to_new, true, true); if (old_node->node_type() == NodeType::kPyOp and new_node->node_type() == NodeType::kPyOp) diff --git a/pybuda/csrc/passes/pre_placer_buda_passes.hpp b/pybuda/csrc/passes/pre_placer_buda_passes.hpp index a703e4e9c..72b94b281 100644 --- a/pybuda/csrc/passes/pre_placer_buda_passes.hpp +++ b/pybuda/csrc/passes/pre_placer_buda_passes.hpp @@ -68,7 +68,7 @@ void validate_buffering_queues(graphlib::Graph *graph); void lower_fallback_data_formats(graphlib::Graph *graph, DataFormat fp32_fallback, bool fp32_acc_supported); -// Convert PyBuda graph to Buda graph +// Convert TTForge graph to Buda graph std::unique_ptr lower_to_buda_ops(Graph *graph); void apply_math_fidelity(graphlib::Graph *graph, const MathFidelity default_math_fidelity); diff --git a/pybuda/csrc/passes/tests/test_erase_inverse_ops.cpp b/pybuda/csrc/passes/tests/test_erase_inverse_ops.cpp index 572fec45e..8cb17ed31 100644 --- a/pybuda/csrc/passes/tests/test_erase_inverse_ops.cpp +++ b/pybuda/csrc/passes/tests/test_erase_inverse_ops.cpp @@ -16,7 +16,7 @@ struct EraseInverseOps : testing::Test EraseInverseOps() { // Two transposes feeding into eltwise which has a transpose after it - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); graphlib::Shape shape = graphlib::Shape::create({1, 1, 512, 160}); graphlib::Shape shapeT = graphlib::Shape::create({1, 1, 160, 512}); diff --git a/pybuda/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp b/pybuda/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp index c5a8982b4..0de14959d 100644 --- a/pybuda/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp +++ b/pybuda/csrc/passes/tests/test_erase_unnecessary_4d_tm_sequence.cpp @@ -16,7 +16,7 @@ struct EraseUnnecessary4DSeq : testing::Test EraseUnnecessary4DSeq() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, NumOperands*58, 64, 64})); diff --git a/pybuda/csrc/passes/tests/test_fuse_pad_conv2d.cpp b/pybuda/csrc/passes/tests/test_fuse_pad_conv2d.cpp index 6a1aa65b7..4ced87d63 100644 --- a/pybuda/csrc/passes/tests/test_fuse_pad_conv2d.cpp +++ b/pybuda/csrc/passes/tests/test_fuse_pad_conv2d.cpp @@ -14,7 +14,7 @@ struct FusePadConv2d : testing::Test FusePadConv2d() { // Two transposes feeding into eltwise which has a transpose after it - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); auto in0_a = create_input(*graph, "in0_a", graphlib::Shape::create({1, 3, 513, 513})); auto param0 = create_input(*graph, "param1", graphlib::Shape::create({32, 3, 3, 3}), graphlib::InputNodeType::Parameter); diff --git a/pybuda/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp b/pybuda/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp index 2f109ded1..6f54dd4e8 100644 --- a/pybuda/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp +++ b/pybuda/csrc/passes/tests/test_fuse_reshape_transpose_pairs_into_slice_or_stack_tm.cpp @@ -14,7 +14,7 @@ struct FuseReshapeTransposeIntoHSlice : testing::Test FuseReshapeTransposeIntoHSlice() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, 1, 1, 32})); @@ -203,7 +203,7 @@ struct FuseTransposeReshapeIntoHStack : testing::Test FuseTransposeReshapeIntoHStack() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, 64, 1, 32})); @@ -425,7 +425,7 @@ struct FuseReshapeIntoVSlice : testing::Test FuseReshapeIntoVSlice() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, 32, 2048, 32})); @@ -468,7 +468,7 @@ struct FuseReshapeIntoVStack : testing::Test FuseReshapeIntoVStack() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, 1024, 64, 32})); diff --git a/pybuda/csrc/passes/tests/test_link_past_cache_ios.cpp b/pybuda/csrc/passes/tests/test_link_past_cache_ios.cpp index 8136a74dc..15ae6f120 100644 --- a/pybuda/csrc/passes/tests/test_link_past_cache_ios.cpp +++ b/pybuda/csrc/passes/tests/test_link_past_cache_ios.cpp @@ -16,7 +16,7 @@ struct WhisperPastCacheBase : testing::Test WhisperPastCacheBase() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition // define input/weight nodes @@ -95,7 +95,7 @@ struct WhisperPastCacheSubGraph : testing::Test WhisperPastCacheSubGraph() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition // define input/weight nodes @@ -173,7 +173,7 @@ struct T5PastCacheRotate : testing::Test T5PastCacheRotate() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition // define input/weight nodes @@ -278,7 +278,7 @@ struct Falcon40bPastCache : testing::Test Falcon40bPastCache() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition // define input/weight nodes @@ -378,7 +378,7 @@ struct Fuyu8bPastCache : testing::Test Fuyu8bPastCache() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition // define input/weight nodes diff --git a/pybuda/csrc/passes/tests/test_move_index_to_mm_weights.cpp b/pybuda/csrc/passes/tests/test_move_index_to_mm_weights.cpp index 6cee13c17..1f0389eb3 100644 --- a/pybuda/csrc/passes/tests/test_move_index_to_mm_weights.cpp +++ b/pybuda/csrc/passes/tests/test_move_index_to_mm_weights.cpp @@ -14,7 +14,7 @@ struct Gpt2Split : testing::Test Gpt2Split() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition auto weight = create_input(*graph, "weight", graphlib::Shape::create({768, 2304}), graphlib::InputNodeType::Parameter); @@ -86,7 +86,7 @@ struct Fuyu8bSplit : testing::Test Fuyu8bSplit() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition auto weight = create_input(*graph, "weight", graphlib::Shape::create({12288, 4096}), graphlib::InputNodeType::Parameter); diff --git a/pybuda/csrc/passes/tests/test_move_select_after_matmul_optional.cpp b/pybuda/csrc/passes/tests/test_move_select_after_matmul_optional.cpp index b83f793eb..06fb2deea 100644 --- a/pybuda/csrc/passes/tests/test_move_select_after_matmul_optional.cpp +++ b/pybuda/csrc/passes/tests/test_move_select_after_matmul_optional.cpp @@ -15,7 +15,7 @@ struct MoveSelectAfterMatmulOptional : testing::Test MoveSelectAfterMatmulOptional() { - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // define input/param/const auto act = create_input(*graph, "act", graphlib::Shape::create({1, 1, 6144, 21632})); diff --git a/pybuda/csrc/passes/tests/test_past_cache_ublock_order.cpp b/pybuda/csrc/passes/tests/test_past_cache_ublock_order.cpp index 60bab386f..946557bce 100644 --- a/pybuda/csrc/passes/tests/test_past_cache_ublock_order.cpp +++ b/pybuda/csrc/passes/tests/test_past_cache_ublock_order.cpp @@ -14,7 +14,7 @@ struct PastCache : testing::Test PastCache() { // Initialize graph - graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA); + graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE); // Graph definition auto pcache = create_input(*graph, "pcache", graphlib::Shape::create({1, 1, 416, 384}), graphlib::InputNodeType::Parameter); diff --git a/pybuda/csrc/test/common.hpp b/pybuda/csrc/test/common.hpp index 6ce95b8ac..3dc23f97f 100644 --- a/pybuda/csrc/test/common.hpp +++ b/pybuda/csrc/test/common.hpp @@ -277,7 +277,7 @@ class GraphTest : public ::testing::Test std::unordered_map op_name_id; }; -using PybudaGraphTest = GraphTest; +using PybudaGraphTest = GraphTest; using BudaGraphTest = GraphTest; inline DeviceConfig create_device_config( diff --git a/pybuda/csrc/tt_torch_device/python_bindings.cpp b/pybuda/csrc/tt_torch_device/python_bindings.cpp index bf86a31f7..832059cc4 100644 --- a/pybuda/csrc/tt_torch_device/python_bindings.cpp +++ b/pybuda/csrc/tt_torch_device/python_bindings.cpp @@ -14,7 +14,7 @@ void TorchDeviceModule(py::module &m_torch_device) m_torch_device.def("get_default_device", &tt::get_default_tt_device, py::return_value_policy::reference); m_torch_device.def("get_available_devices", []() { return tt::get_available_tt_devices(); }); - py::class_(m_torch_device, "PyBudaTensorDesc") + py::class_(m_torch_device, "TTForgeTensorDesc") .def( py::init< std::string const&, @@ -25,10 +25,10 @@ void TorchDeviceModule(py::module &m_torch_device) py::arg("shape"), py::arg("ptr") = -1, py::arg("constant") = std::nullopt) - .def_readonly("name", &tt::PyBudaTensorDesc::name) - .def_readonly("shape", &tt::PyBudaTensorDesc::shape) - .def_readonly("ptr", &tt::PyBudaTensorDesc::ptr) - .def_readonly("constant", &tt::PyBudaTensorDesc::constant); + .def_readonly("name", &tt::TTForgeTensorDesc::name) + .def_readonly("shape", &tt::TTForgeTensorDesc::shape) + .def_readonly("ptr", &tt::TTForgeTensorDesc::ptr) + .def_readonly("constant", &tt::TTForgeTensorDesc::constant); py::class_>(m_torch_device, "Workload") .def_readonly("inputs", &tt::Workload::inputs) diff --git a/pybuda/csrc/tt_torch_device/tt_device.cpp b/pybuda/csrc/tt_torch_device/tt_device.cpp index 72a87c20e..9922674c8 100644 --- a/pybuda/csrc/tt_torch_device/tt_device.cpp +++ b/pybuda/csrc/tt_torch_device/tt_device.cpp @@ -208,7 +208,7 @@ std::vector dispatch( const auto& subgraph_outputs = workload->outputs.at(program_idx); for (auto const& output : outputs) { - PyBudaTensorDesc const& desc = subgraph_outputs.at(output_idx ); + TTForgeTensorDesc const& desc = subgraph_outputs.at(output_idx ); std::string runtime_transform = device.output_runtime_transforms.at(program_idx).at(output_idx ); // auto impl = output.unsafeGetTensorImpl(); diff --git a/pybuda/csrc/tt_torch_device/tt_device.hpp b/pybuda/csrc/tt_torch_device/tt_device.hpp index e638b908b..af18ef0f8 100644 --- a/pybuda/csrc/tt_torch_device/tt_device.hpp +++ b/pybuda/csrc/tt_torch_device/tt_device.hpp @@ -35,14 +35,14 @@ struct TTMetaData : public c10::BackendMeta { int unique_output_id = -1; }; -struct PyBudaTensorDesc +struct TTForgeTensorDesc { std::string name; std::vector shape; int ptr = -1; std::optional constant; - PyBudaTensorDesc( + TTForgeTensorDesc( std::string name, std::vector shape, int ptr, @@ -58,19 +58,19 @@ using Program = int; struct Workload { std::shared_ptr flatbuffer; - std::map> inputs; - std::vector constants; - std::vector parameters; - std::map> outputs; + std::map> inputs; + std::vector constants; + std::vector parameters; + std::map> outputs; bool initialized = false; std::unordered_map subgraph_link_tensor_populated; Workload( std::shared_ptr flatbuffer, - std::map> const& inputs, // a vector per program - std::vector const& constants, - std::vector const& parameters, - std::map> const& outputs) : + std::map> const& inputs, // a vector per program + std::vector const& constants, + std::vector const& parameters, + std::map> const& outputs) : flatbuffer(flatbuffer), inputs(inputs), constants(constants), diff --git a/pybuda/pybuda/_C/torch_device.pyi b/pybuda/pybuda/_C/torch_device.pyi index b1e9ac4b7..34ebd6954 100644 --- a/pybuda/pybuda/_C/torch_device.pyi +++ b/pybuda/pybuda/_C/torch_device.pyi @@ -1,7 +1,7 @@ import pybuda._C import torch -class PyBudaTensorDesc: +class TTForgeTensorDesc: def __init__(self, name: str, shape: list[int], ptr: int = ..., constant: torch.Tensor | None = ...) -> None: ... @property def constant(self) -> torch.Tensor | None: ... @@ -33,13 +33,13 @@ class TTDevice: class Workload: def __init__(self, *args, **kwargs) -> None: ... @property - def constants(self) -> list[PyBudaTensorDesc]: ... + def constants(self) -> list[TTForgeTensorDesc]: ... @property - def inputs(self) -> dict[int, list[PyBudaTensorDesc]]: ... + def inputs(self) -> dict[int, list[TTForgeTensorDesc]]: ... @property - def outputs(self) -> dict[int, list[PyBudaTensorDesc]]: ... + def outputs(self) -> dict[int, list[TTForgeTensorDesc]]: ... @property - def parameters(self) -> list[PyBudaTensorDesc]: ... + def parameters(self) -> list[TTForgeTensorDesc]: ... def get_available_devices(*args, **kwargs): ... def get_default_device(*args, **kwargs): ... diff --git a/pybuda/pybuda/torch_compile.py b/pybuda/pybuda/torch_compile.py index d758ddf11..496044ca0 100644 --- a/pybuda/pybuda/torch_compile.py +++ b/pybuda/pybuda/torch_compile.py @@ -12,7 +12,7 @@ from loguru import logger -from pybuda._C.torch_device import get_default_device, unique_id, PyBudaTensorDesc +from pybuda._C.torch_device import get_default_device, unique_id, TTForgeTensorDesc from pybuda.compiled_graph_state import CompiledGraphState from pybuda.fx.capture import CaptureFX from pybuda.fx.schedule import TensorSource @@ -103,7 +103,7 @@ def _build_backend_compile_request(device, compiler_cfg, compiled_graph_state, s for program_id in program_ids: graph_idx = MixedGraph.get_program_subgraph_id(subgraph_idx, program_id) program_inputs = [ - PyBudaTensorDesc(name, shape) + TTForgeTensorDesc(name, shape) for name, shape in zip( compiled_graph_state.get_ordered_input_names_for_subgraph(graph_idx), compiled_graph_state.get_ordered_input_shapes_for_subgraph(graph_idx) ) @@ -130,7 +130,7 @@ def _build_backend_compile_request(device, compiler_cfg, compiled_graph_state, s # input_tile_bcast_dims[i] = compiled_graph_state.get_ordered_input_tile_broadcast_dims_for_subgraph(i) constants = [ - PyBudaTensorDesc( + TTForgeTensorDesc( name, constant.shape, constant=constant, @@ -139,7 +139,7 @@ def _build_backend_compile_request(device, compiler_cfg, compiled_graph_state, s ] parameters = [ - PyBudaTensorDesc(name, param.shape, ptr=0) + TTForgeTensorDesc(name, param.shape, ptr=0) for name, param in compiled_graph_state.post_const_eval_parameters.items() ] @@ -147,7 +147,7 @@ def _build_backend_compile_request(device, compiler_cfg, compiled_graph_state, s for program_id in program_ids: graph_idx = MixedGraph.get_program_subgraph_id(subgraph_idx, program_id) program_outputs = [ - PyBudaTensorDesc(name, shape) + TTForgeTensorDesc(name, shape) for name, shape in zip( compiled_graph_state.get_ordered_output_names_for_subgraph(graph_idx), compiled_graph_state.get_ordered_output_shapes_for_subgraph(graph_idx) ) diff --git a/utils/signal_handlers.hpp b/utils/signal_handlers.hpp index 5320cd0ca..3b982bef2 100644 --- a/utils/signal_handlers.hpp +++ b/utils/signal_handlers.hpp @@ -10,7 +10,7 @@ #include "utils/assert.hpp" #include "runtime/tt_device.hpp" -inline void pybuda_signal_handler(int sig) +inline void tt_forge_signal_handler(int sig) { std::string signal_name; switch (sig) @@ -32,7 +32,7 @@ inline void pybuda_signal_handler(int sig) break; } - std::cerr << "pybuda_signal_handler - signal: " << sig << " (" << signal_name << ")" << std::endl; + std::cerr << "tt_forge_signal_handler - signal: " << sig << " (" << signal_name << ")" << std::endl; std::cerr << "stacktrace: " << std::endl; std::vector bt = tt::assert::backtrace(100, 0); @@ -77,7 +77,7 @@ class SignalHandlers // to print the stacktrace before the program crashes. for (auto sig : {SIGSEGV, SIGILL, SIGFPE, SIGABRT}) { - if (std::signal(sig, pybuda_signal_handler) == SIG_ERR) + if (std::signal(sig, tt_forge_signal_handler) == SIG_ERR) { std::cerr << "Failed to register signal handler for signal " << sig << std::endl; }