Skip to content

Commit

Permalink
rename pybuda to tt_forge starting from .hpp files
Browse files Browse the repository at this point in the history
  • Loading branch information
dgolubovicTT committed Jul 29, 2024
1 parent 5a04920 commit f6b1ec4
Show file tree
Hide file tree
Showing 27 changed files with 73 additions and 72 deletions.
6 changes: 3 additions & 3 deletions pybuda/csrc/buda_passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ run_post_initial_graph_passes(graphlib::Graph *graph, py::object compiler_cfg_ob
passes::explicate_unsqueeze(graph);
passes::fuse_conv2d_bias(graph);

auto inserted_node_id_mapping = decompose_pybuda_graph(graph, "get_f_pybuda_decompose", compiler_cfg);
auto inserted_node_id_mapping = decompose_tt_forge_graph(graph, "get_f_pybuda_decompose", compiler_cfg);
auto chip_id_assignments = passes::fracture(graph, fracture_groups);
return std::make_tuple(inserted_node_id_mapping, chip_id_assignments);
}
Expand Down Expand Up @@ -176,7 +176,7 @@ std::vector<std::pair<graphlib::NodeId, graphlib::NodeId>> run_post_optimize_dec
std::shared_ptr<void> compiler_cfg = make_shared_py_object(compiler_cfg_object);

passes::print_graph(graph, "POST_OPTIMIZE");
auto inserted_node_id_mapping = decompose_pybuda_graph(graph, "get_f_pybuda_decompose_post_optimize", compiler_cfg);
auto inserted_node_id_mapping = decompose_tt_forge_graph(graph, "get_f_pybuda_decompose_post_optimize", compiler_cfg);

return inserted_node_id_mapping;
}
Expand All @@ -189,7 +189,7 @@ std::vector<std::pair<graphlib::NodeId, graphlib::NodeId>> run_post_autograd_gra

passes::print_graph(graph, "POST_AUTOGRAD");
lower_bwd_gather_ops(graph);
return decompose_pybuda_graph(graph, "get_f_pybuda_decompose_post_autograd", compiler_cfg);
return decompose_tt_forge_graph(graph, "get_f_pybuda_decompose_post_autograd", compiler_cfg);
}

// ********** Run pre-lowering passes **********
Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/graph_lib/graph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class EdgeAttributes;

enum class IRLevel
{
IR_PYBUDA,
IR_TT_FORGE,
IR_BUDA,
IR_CONSTEVAL,
};
Expand Down
6 changes: 3 additions & 3 deletions pybuda/csrc/graph_lib/node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ NodeId Node::id() const {
TT_ASSERT(unique_id_ >= 0);
return unique_id_;
}
NodeId Node::pybuda_id() const {
return pybuda_id_;
NodeId Node::tt_forge_id() const {
return tt_forge_id_;
}

void Node::set_id(NodeId node_id) { unique_id_ = node_id; }
void Node::set_pybuda_id(NodeId node_id) { pybuda_id_ = node_id; }
void Node::set_tt_forge_id(NodeId node_id) { tt_forge_id_ = node_id; }
const std::string& Node::name() const { return name_; }
void Node::set_name(const std::string& name) { name_ = name; }

Expand Down
6 changes: 3 additions & 3 deletions pybuda/csrc/graph_lib/node.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class Node {
private:
std::string name_;
NodeId unique_id_ = -1;
NodeId pybuda_id_ = -1;
NodeId tt_forge_id_ = -1;

int padding_id = 0;

Expand All @@ -59,9 +59,9 @@ class Node {
virtual ~Node() = default;

NodeId id() const;
NodeId pybuda_id() const;
NodeId tt_forge_id() const;
void set_id(NodeId node_id);
void set_pybuda_id(NodeId node_id);
void set_tt_forge_id(NodeId node_id);
const std::string& name() const;
void set_name(const std::string& name);

Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/graph_lib/node_types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ class OpNode : public TaggedNode
{
return py_attr(attr_name)(args...).template cast<T>();
}
IRLevel get_ir_level() const { return (node_type() == NodeType::kPyOp) ? IRLevel::IR_PYBUDA : IRLevel::IR_BUDA; }
IRLevel get_ir_level() const { return (node_type() == NodeType::kPyOp) ? IRLevel::IR_TT_FORGE : IRLevel::IR_BUDA; }
const std::string &op_name() const { return op_type_.op; }
const std::vector<OpType::Attr> &op_attrs() const { return op_type_.attr; }
void overwrite_op_attrs(std::vector<OpType::Attr> op_attrs) { op_type_.attr = op_attrs; }
Expand Down
17 changes: 9 additions & 8 deletions pybuda/csrc/graph_lib/python_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ py::object eval_input_bw(Node *node, py::object inputs, bool is_buda);
void GraphModule(py::module &m_graph)
{
py::class_<Graph>(m_graph, "Graph")
.def(py::init([](std::string name) { return std::make_unique<Graph>(graphlib::IRLevel::IR_PYBUDA, name); }))
.def(py::init([](std::string name) { return std::make_unique<Graph>(graphlib::IRLevel::IR_TT_FORGE, name); }))
.def("clone", [](Graph &self) { return self.clone(); })
.def("get_node_name", [](const Graph &self, const graphlib::NodeId id) { return self.node_by_id(id)->name(); })
.def("get_name", [](const Graph &self) { return self.name(); })
Expand Down Expand Up @@ -618,7 +618,7 @@ py::object eval_op(graphlib::OpType type, std::vector<py::object> inputs, graphl
py::object eval_module;

switch (ir_level) {
case graphlib::IRLevel::IR_PYBUDA: eval_module = py::module_::import("pybuda.op.eval.pybuda"); break;
case graphlib::IRLevel::IR_TT_FORGE: eval_module = py::module_::import("pybuda.op.eval.pybuda"); break;
case graphlib::IRLevel::IR_BUDA: eval_module = py::module_::import("pybuda.op.eval.buda"); break;
case graphlib::IRLevel::IR_CONSTEVAL: eval_module = py::module_::import("pybuda.op.eval.pybuda"); break;
}
Expand Down Expand Up @@ -670,7 +670,7 @@ py::object eval_relu(py::object tensor, graphlib::OpType type)
: "min";

graphlib::OpType relu("relu", {relu_threshold, relu_mode});
tensor = eval_op(relu, inputs, graphlib::IRLevel::IR_PYBUDA);
tensor = eval_op(relu, inputs, graphlib::IRLevel::IR_TT_FORGE);
}
return tensor;
}
Expand Down Expand Up @@ -699,7 +699,7 @@ py::object eval_golden_transforms(graphlib::Node *node, py::object tensor, bool
//
if (!eval_for_output || (op_type.op != "reshape" && op_type.op != "transpose"))
{
tensor = eval_op(op_type, {tensor}, graphlib::IRLevel::IR_PYBUDA);
tensor = eval_op(op_type, {tensor}, graphlib::IRLevel::IR_TT_FORGE);
}
}

Expand All @@ -718,7 +718,7 @@ void eval_partial_datacopy_golden_transforms(

for (auto const &op_type : golden_transforms)
{
output_tensor = eval_op(op_type, {output_tensor}, graphlib::IRLevel::IR_PYBUDA);
output_tensor = eval_op(op_type, {output_tensor}, graphlib::IRLevel::IR_TT_FORGE);
}

if (ret.at(output_index).ptr() == nullptr)
Expand All @@ -728,7 +728,8 @@ void eval_partial_datacopy_golden_transforms(
else
{
graphlib::OpType overlay("add");
ret.at(output_index) = eval_op(overlay, {ret.at(output_index), output_tensor}, graphlib::IRLevel::IR_PYBUDA);
ret.at(output_index) = eval_op(overlay, {ret.at(output_index), output_tensor}, graphlib::IRLevel::IR_TT_FORGE
);
}
}

Expand Down Expand Up @@ -1453,7 +1454,7 @@ eval_graph(
}
}

auto golden_node_id = (graph->get_ir_level() == graphlib::IRLevel::IR_BUDA) ? node->pybuda_id() : node->id();
auto golden_node_id = (graph->get_ir_level() == graphlib::IRLevel::IR_BUDA) ? node->tt_forge_id() : node->id();
if (op_node->has_golden_id()) {
golden_node_id = op_node->golden_id(); // if a different intermediate node is used as a reference...
}
Expand All @@ -1468,7 +1469,7 @@ eval_graph(
// Check if there's a gradient to check
if (gradient_edges.size() > 0) {
Node* producer = graph->node_by_id(gradient_edges.at(0).producer_node_id);
auto node_id = (graph->get_ir_level() == graphlib::IRLevel::IR_BUDA) ? producer->pybuda_id() : producer->id();
auto node_id = (graph->get_ir_level() == graphlib::IRLevel::IR_BUDA) ? producer->tt_forge_id() : producer->id();
auto golden_fwd = intermediate_golden_tensors.find(node_id);
if (golden_fwd != intermediate_golden_tensors.end()) {
bool is_valid = is_gradient_comparison_valid(graph, gradient_edges.at(0));
Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/graph_lib/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1946,7 +1946,7 @@ std::unique_ptr<Node> ConstEvalGraph::promote_node(
Graph *runtime_graph, Node *runtime_node, std::unique_ptr<Node> &&consteval_node_free)
{
TT_ASSERT(not runtime_graph or runtime_node);
TT_ASSERT(not runtime_graph or runtime_graph->get_ir_level() == IRLevel::IR_PYBUDA);
TT_ASSERT(not runtime_graph or runtime_graph->get_ir_level() == IRLevel::IR_TT_FORGE);

graph_updated_since_autograd = true;

Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/passes/decomposing_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ NodeContext DecomposingContext::tensor(std::shared_ptr<void> tensor, graphlib::S
return NodeContext(new_node);
}

std::vector<std::pair<graphlib::NodeId, graphlib::NodeId>> decompose_pybuda_graph(
std::vector<std::pair<graphlib::NodeId, graphlib::NodeId>> decompose_tt_forge_graph(
Graph *graph, const char *dispatcher_name, std::shared_ptr<void> compiler_cfg)
{
std::vector<std::pair<graphlib::NodeId, graphlib::NodeId>> inserted_node_id_mapping;
Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/passes/decomposing_context.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class DecomposingContext
inline std::shared_ptr<void> get_compiler_cfg() { return compiler_cfg; }
};

std::vector<std::pair<graphlib::NodeId, graphlib::NodeId>> decompose_pybuda_graph(
std::vector<std::pair<graphlib::NodeId, graphlib::NodeId>> decompose_tt_forge_graph(
Graph* graph, const char* dispatcher_name, std::shared_ptr<void> compiler_cfg);

} // namespace tt
2 changes: 1 addition & 1 deletion pybuda/csrc/passes/lowering_context.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

namespace tt {

// Lowering context provide an API for Python to create lowered Buda ops, given a PyBuda op and
// Lowering context provide an API for Python to create lowered Buda ops, given a TTForge op and
// its operands.
using Graph = graphlib::Graph;
using Node = graphlib::Node;
Expand Down
4 changes: 2 additions & 2 deletions pybuda/csrc/passes/pre_placer_buda_passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1142,7 +1142,7 @@ void constant_pre_broadcast(Graph *graph)
}
}

// Convert PyBuda graph to Buda graph
// Convert TTForge graph to Buda graph
std::unique_ptr<Graph> lower_to_buda_ops(Graph *graph)
{

Expand Down Expand Up @@ -1187,7 +1187,7 @@ std::unique_ptr<Graph> lower_to_buda_ops(Graph *graph)
Node* old_node = graph->node_by_id(old_node_id);
Node* new_node = old_to_new.at(old_node);

new_node->set_pybuda_id(old_node->id());
new_node->set_tt_forge_id(old_node->id());
copy_operand_edges_to_new_graph(graph, new_graph.get(), old_node, new_node, old_to_new, true, true);

if (old_node->node_type() == NodeType::kPyOp and new_node->node_type() == NodeType::kPyOp)
Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/passes/pre_placer_buda_passes.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ void validate_buffering_queues(graphlib::Graph *graph);

void lower_fallback_data_formats(graphlib::Graph *graph, DataFormat fp32_fallback, bool fp32_acc_supported);

// Convert PyBuda graph to Buda graph
// Convert TTForge graph to Buda graph
std::unique_ptr<Graph> lower_to_buda_ops(Graph *graph);

void apply_math_fidelity(graphlib::Graph *graph, const MathFidelity default_math_fidelity);
Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/passes/tests/test_erase_inverse_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ struct EraseInverseOps : testing::Test
EraseInverseOps()
{
// Two transposes feeding into eltwise which has a transpose after it
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

graphlib::Shape shape = graphlib::Shape::create({1, 1, 512, 160});
graphlib::Shape shapeT = graphlib::Shape::create({1, 1, 160, 512});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ struct EraseUnnecessary4DSeq : testing::Test
EraseUnnecessary4DSeq()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, NumOperands*58, 64, 64}));
Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/passes/tests/test_fuse_pad_conv2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ struct FusePadConv2d : testing::Test
FusePadConv2d()
{
// Two transposes feeding into eltwise which has a transpose after it
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

auto in0_a = create_input(*graph, "in0_a", graphlib::Shape::create({1, 3, 513, 513}));
auto param0 = create_input(*graph, "param1", graphlib::Shape::create({32, 3, 3, 3}), graphlib::InputNodeType::Parameter);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ struct FuseReshapeTransposeIntoHSlice : testing::Test
FuseReshapeTransposeIntoHSlice()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, 1, 1, 32}));
Expand Down Expand Up @@ -203,7 +203,7 @@ struct FuseTransposeReshapeIntoHStack : testing::Test
FuseTransposeReshapeIntoHStack()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, 64, 1, 32}));
Expand Down Expand Up @@ -425,7 +425,7 @@ struct FuseReshapeIntoVSlice : testing::Test
FuseReshapeIntoVSlice()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, 32, 2048, 32}));
Expand Down Expand Up @@ -468,7 +468,7 @@ struct FuseReshapeIntoVStack : testing::Test
FuseReshapeIntoVStack()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
auto input_0 = create_input(*graph, "input_0", graphlib::Shape::create({1, 1024, 64, 32}));
Expand Down
10 changes: 5 additions & 5 deletions pybuda/csrc/passes/tests/test_link_past_cache_ios.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ struct WhisperPastCacheBase : testing::Test
WhisperPastCacheBase()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
// define input/weight nodes
Expand Down Expand Up @@ -95,7 +95,7 @@ struct WhisperPastCacheSubGraph : testing::Test
WhisperPastCacheSubGraph()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
// define input/weight nodes
Expand Down Expand Up @@ -173,7 +173,7 @@ struct T5PastCacheRotate : testing::Test
T5PastCacheRotate()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
// define input/weight nodes
Expand Down Expand Up @@ -278,7 +278,7 @@ struct Falcon40bPastCache : testing::Test
Falcon40bPastCache()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
// define input/weight nodes
Expand Down Expand Up @@ -378,7 +378,7 @@ struct Fuyu8bPastCache : testing::Test
Fuyu8bPastCache()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
// define input/weight nodes
Expand Down
4 changes: 2 additions & 2 deletions pybuda/csrc/passes/tests/test_move_index_to_mm_weights.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ struct Gpt2Split : testing::Test
Gpt2Split()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
auto weight = create_input(*graph, "weight", graphlib::Shape::create({768, 2304}), graphlib::InputNodeType::Parameter);
Expand Down Expand Up @@ -86,7 +86,7 @@ struct Fuyu8bSplit : testing::Test
Fuyu8bSplit()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
auto weight = create_input(*graph, "weight", graphlib::Shape::create({12288, 4096}), graphlib::InputNodeType::Parameter);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ struct MoveSelectAfterMatmulOptional : testing::Test

MoveSelectAfterMatmulOptional()
{
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// define input/param/const
auto act = create_input(*graph, "act", graphlib::Shape::create({1, 1, 6144, 21632}));
Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/passes/tests/test_past_cache_ublock_order.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ struct PastCache : testing::Test
PastCache()
{
// Initialize graph
graph = new graphlib::Graph(graphlib::IRLevel::IR_PYBUDA);
graph = new graphlib::Graph(graphlib::IRLevel::IR_TT_FORGE);

// Graph definition
auto pcache = create_input(*graph, "pcache", graphlib::Shape::create({1, 1, 416, 384}), graphlib::InputNodeType::Parameter);
Expand Down
2 changes: 1 addition & 1 deletion pybuda/csrc/test/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ class GraphTest : public ::testing::Test
std::unordered_map<std::string, int> op_name_id;
};

using PybudaGraphTest = GraphTest<graphlib::IRLevel::IR_PYBUDA>;
using PybudaGraphTest = GraphTest<graphlib::IRLevel::IR_TT_FORGE>;
using BudaGraphTest = GraphTest<graphlib::IRLevel::IR_BUDA>;

inline DeviceConfig create_device_config(
Expand Down
10 changes: 5 additions & 5 deletions pybuda/csrc/tt_torch_device/python_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ void TorchDeviceModule(py::module &m_torch_device)
m_torch_device.def("get_default_device", &tt::get_default_tt_device, py::return_value_policy::reference);
m_torch_device.def("get_available_devices", []() { return tt::get_available_tt_devices(); });

py::class_<tt::PyBudaTensorDesc>(m_torch_device, "PyBudaTensorDesc")
py::class_<tt::TTForgeTensorDesc>(m_torch_device, "TTForgeTensorDesc")
.def(
py::init<
std::string const&,
Expand All @@ -25,10 +25,10 @@ void TorchDeviceModule(py::module &m_torch_device)
py::arg("shape"),
py::arg("ptr") = -1,
py::arg("constant") = std::nullopt)
.def_readonly("name", &tt::PyBudaTensorDesc::name)
.def_readonly("shape", &tt::PyBudaTensorDesc::shape)
.def_readonly("ptr", &tt::PyBudaTensorDesc::ptr)
.def_readonly("constant", &tt::PyBudaTensorDesc::constant);
.def_readonly("name", &tt::TTForgeTensorDesc::name)
.def_readonly("shape", &tt::TTForgeTensorDesc::shape)
.def_readonly("ptr", &tt::TTForgeTensorDesc::ptr)
.def_readonly("constant", &tt::TTForgeTensorDesc::constant);

py::class_<tt::Workload, std::shared_ptr<tt::Workload>>(m_torch_device, "Workload")
.def_readonly("inputs", &tt::Workload::inputs)
Expand Down
Loading

0 comments on commit f6b1ec4

Please sign in to comment.